Merge git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
authorLinus Torvalds <torvalds@g5.osdl.org>
Tue, 17 Oct 2006 15:56:43 +0000 (08:56 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Tue, 17 Oct 2006 15:56:43 +0000 (08:56 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input:
  Input: fm801-gp - handle errors from pci_enable_device()
  Input: gameport core - handle errors returned by device_bind_driver()
  Input: serio core - handle errors returned by device_bind_driver()
  Lockdep: fix compile error in drivers/input/serio/serio.c
  Input: serio - add lockdep annotations
  Lockdep: add lockdep_set_class_and_subclass() and lockdep_set_subclass()
  Input: atkbd - supress "too many keys" error message
  Input: i8042 - supress ACK/NAKs when blinking during panic
  Input: add missing exports to fix modular build

584 files changed:
Documentation/filesystems/00-INDEX
Documentation/filesystems/ext4.txt [new file with mode: 0644]
Documentation/ibm-acpi.txt
Documentation/lockdep-design.txt
Documentation/sysctl/kernel.txt
Documentation/video4linux/CARDLIST.cx88
MAINTAINERS
Makefile
arch/alpha/kernel/alpha_ksyms.c
arch/alpha/kernel/core_irongate.c
arch/alpha/kernel/irq_alpha.c
arch/alpha/kernel/pci-noop.c
arch/alpha/kernel/pci_iommu.c
arch/alpha/kernel/process.c
arch/alpha/kernel/setup.c
arch/alpha/kernel/smp.c
arch/alpha/kernel/time.c
arch/alpha/mm/numa.c
arch/arm/kernel/armksyms.c
arch/arm/mach-versatile/core.c
arch/arm/mach-versatile/pci.c
arch/arm/vfp/vfpmodule.c
arch/arm26/kernel/armksyms.c
arch/avr32/kernel/time.c
arch/avr32/mach-at32ap/extint.c
arch/avr32/mach-at32ap/intc.c
arch/i386/Kconfig.cpu
arch/i386/kernel/acpi/boot.c
arch/i386/kernel/acpi/cstate.c
arch/i386/kernel/apm.c
arch/i386/kernel/cpu/mcheck/therm_throt.c
arch/i386/kernel/i8253.c
arch/i386/kernel/i8259.c
arch/i386/kernel/io_apic.c
arch/i386/kernel/irq.c
arch/i386/kernel/microcode.c
arch/i386/kernel/process.c
arch/i386/kernel/setup.c
arch/i386/kernel/syscall_table.S
arch/i386/kernel/tsc.c
arch/i386/lib/usercopy.c
arch/i386/mach-voyager/voyager_basic.c
arch/i386/mach-voyager/voyager_smp.c
arch/i386/mm/discontig.c
arch/ia64/mm/contig.c
arch/ia64/mm/discontig.c
arch/m32r/kernel/setup.c
arch/m32r/kernel/setup_mappi.c
arch/m32r/kernel/signal.c
arch/m32r/kernel/smp.c
arch/m32r/kernel/sys_m32r.c
arch/m32r/kernel/traps.c
arch/m68k/kernel/m68k_ksyms.c
arch/m68k/kernel/process.c
arch/m68k/kernel/setup.c
arch/m68k/kernel/traps.c
arch/m68k/mm/kmap.c
arch/m68k/mm/memory.c
arch/m68k/mm/sun3kmap.c
arch/m68k/sun3/Makefile
arch/m68k/sun3/idprom.c
arch/m68k/sun3/sun3_ksyms.c [deleted file]
arch/m68k/sun3/sun3dvma.c
arch/m68knommu/kernel/syscalltable.S
arch/mips/Makefile
arch/mips/configs/bigsur_defconfig
arch/mips/jazz/setup.c
arch/mips/kernel/smp.c
arch/mips/sgi-ip27/ip27-klnuma.c
arch/mips/sibyte/bcm1480/smp.c
arch/parisc/kernel/parisc_ksyms.c
arch/powerpc/configs/mpc834x_itx_defconfig
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/traps.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/83xx/Kconfig
arch/powerpc/platforms/83xx/Makefile
arch/powerpc/platforms/83xx/mpc8360e_pb.c
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spufs/file.c
arch/ppc/kernel/time.c
arch/ppc/mm/init.c
arch/ppc/platforms/mpc8272ads_setup.c
arch/ppc/platforms/mpc866ads_setup.c
arch/ppc/platforms/mpc885ads_setup.c
arch/s390/appldata/appldata_base.c
arch/s390/kernel/s390_ext.c
arch/s390/kernel/s390_ksyms.c
arch/s390/kernel/stacktrace.c
arch/s390/kernel/vtime.c
arch/sh/Kconfig
arch/sh/boards/hp6xx/hp6xx_apm.c
arch/sh/boards/landisk/landisk_pwb.c
arch/sh/boards/mpc1211/setup.c
arch/sh/boards/renesas/r7780rp/irq.c
arch/sh/boards/snapgear/setup.c
arch/sh/cchips/hd6446x/hd64461/setup.c
arch/sh/cchips/hd6446x/hd64465/gpio.c
arch/sh/cchips/hd6446x/hd64465/setup.c
arch/sh/cchips/voyagergx/irq.c
arch/sh/drivers/dma/dma-g2.c
arch/sh/drivers/dma/dma-pvr2.c
arch/sh/drivers/dma/dma-sh.c
arch/sh/drivers/pci/pci-sh7751.c
arch/sh/drivers/pci/pci-st40.c
arch/sh/kernel/cpu/irq/intc2.c
arch/sh/kernel/cpu/irq/ipr.c
arch/sh/kernel/cpu/sh3/ex.S
arch/sh/kernel/cpu/sh4/ex.S
arch/sh/kernel/entry.S
arch/sh/kernel/irq.c
arch/sh/kernel/process.c
arch/sh/kernel/time.c
arch/sh/kernel/timers/timer-tmu.c
arch/sh/mm/consistent.c
arch/sparc/kernel/pcic.c
arch/sparc/kernel/setup.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/mm/srmmu.c
arch/sparc64/kernel/setup.c
arch/um/Kconfig
arch/um/Kconfig.i386
arch/um/Makefile-x86_64
arch/um/include/common-offsets.h
arch/um/include/kern_util.h
arch/um/include/longjmp.h
arch/um/include/os.h
arch/um/include/sysdep-i386/kernel-offsets.h
arch/um/include/sysdep-x86_64/kernel-offsets.h
arch/um/kernel/skas/mmu.c
arch/um/kernel/tt/uaccess_user.c
arch/um/os-Linux/tt.c
arch/um/os-Linux/util.c
arch/um/sys-x86_64/ksyms.c
arch/um/sys-x86_64/stub_segv.c
arch/x86_64/kernel/i8259.c
arch/x86_64/kernel/io_apic.c
arch/x86_64/kernel/irq.c
arch/x86_64/kernel/process.c
arch/x86_64/kernel/vsmp.c
arch/x86_64/mm/init.c
arch/x86_64/mm/numa.c
block/elevator.c
drivers/Kconfig
drivers/acpi/asus_acpi.c
drivers/acpi/battery.c
drivers/acpi/ec.c
drivers/acpi/events/evmisc.c
drivers/acpi/events/evrgnini.c
drivers/acpi/ibm_acpi.c
drivers/acpi/motherboard.c
drivers/acpi/osl.c
drivers/acpi/pci_link.c
drivers/acpi/power.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/sbs.c
drivers/acpi/tables/tbget.c
drivers/acpi/tables/tbrsdt.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/pata_qdi.c
drivers/ata/sata_promise.c
drivers/block/DAC960.h
drivers/block/amiflop.c
drivers/block/rd.c
drivers/block/xd.c
drivers/block/z2ram.c
drivers/bluetooth/bcm203x.c
drivers/char/Kconfig
drivers/char/epca.c
drivers/char/ip2/i2lib.c
drivers/char/ip2/i2lib.h
drivers/char/ip2/ip2main.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/mem.c
drivers/char/rio/rioctrl.c
drivers/char/serial167.c
drivers/char/specialix.c
drivers/char/sx.c
drivers/char/synclink.c
drivers/char/tpm/tpm.c
drivers/char/tpm/tpm_atmel.c
drivers/char/tpm/tpm_nsc.c
drivers/eisa/eisa-bus.c
drivers/firmware/dell_rbu.c
drivers/firmware/efivars.c
drivers/ide/ide-cd.c
drivers/ide/ide-taskfile.c
drivers/ide/pci/generic.c
drivers/ide/pci/sgiioc4.c
drivers/input/misc/hp_sdc_rtc.c
drivers/input/misc/wistron_btns.c
drivers/input/serio/hil_mlc.c
drivers/input/serio/hp_sdc.c
drivers/isdn/capi/capidrv.c
drivers/isdn/hisax/config.c
drivers/isdn/hysdn/boardergo.c
drivers/isdn/hysdn/hysdn_defs.h
drivers/isdn/hysdn/hysdn_proclog.c
drivers/isdn/hysdn/hysdn_sched.c
drivers/isdn/i4l/isdn_common.c
drivers/isdn/icn/icn.c
drivers/isdn/isdnloop/isdnloop.c
drivers/isdn/isdnloop/isdnloop.h
drivers/isdn/pcbit/drv.c
drivers/isdn/pcbit/layer2.c
drivers/isdn/sc/init.c
drivers/isdn/sc/packet.c
drivers/isdn/sc/shmem.c
drivers/leds/led-class.c
drivers/leds/ledtrig-timer.c
drivers/mca/mca-bus.c
drivers/md/bitmap.c
drivers/md/md.c
drivers/media/dvb/bt8xx/dvb-bt8xx.c
drivers/media/dvb/dvb-core/Kconfig
drivers/media/dvb/dvb-usb/dibusb-common.c
drivers/media/dvb/dvb-usb/dibusb.h
drivers/media/dvb/dvb-usb/nova-t-usb2.c
drivers/media/dvb/frontends/dib3000mc.c
drivers/media/dvb/frontends/dib3000mc.h
drivers/media/dvb/frontends/tda10086.h
drivers/media/dvb/frontends/tda826x.h
drivers/media/video/Kconfig
drivers/media/video/cx25840/cx25840-vbi.c
drivers/media/video/cx88/cx88-cards.c
drivers/media/video/cx88/cx88-dvb.c
drivers/media/video/cx88/cx88-input.c
drivers/media/video/et61x251/et61x251_core.c
drivers/media/video/ov511.c
drivers/media/video/pwc/pwc-if.c
drivers/media/video/saa7115.c
drivers/media/video/saa7134/saa7134-video.c
drivers/media/video/sn9c102/sn9c102_core.c
drivers/media/video/stv680.c
drivers/media/video/tuner-types.c
drivers/media/video/videodev.c
drivers/media/video/vivi.c
drivers/message/i2o/bus-osm.c
drivers/message/i2o/exec-osm.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/ioc4.c [new file with mode: 0644]
drivers/misc/msi-laptop.c [new file with mode: 0644]
drivers/net/b44.c
drivers/net/bonding/bond_alb.c
drivers/net/ehea/ehea.h
drivers/net/ehea/ehea_main.c
drivers/net/ehea/ehea_phyp.c
drivers/net/eth16i.c
drivers/net/forcedeth.c
drivers/net/ibmveth.c
drivers/net/mv643xx_eth.c
drivers/net/skge.c
drivers/net/skge.h
drivers/net/sky2.c
drivers/net/sky2.h
drivers/net/smc91x.h
drivers/net/spider_net.c
drivers/net/spider_net.h
drivers/net/spider_net_ethtool.c
drivers/net/sun3_82586.c
drivers/net/sun3lance.c
drivers/net/tulip/de2104x.c
drivers/pci/Kconfig
drivers/rtc/rtc-max6902.c
drivers/rtc/rtc-sh.c
drivers/rtc/rtc-v3020.c
drivers/s390/char/monwriter.c
drivers/s390/cio/chsc.c
drivers/s390/cio/cio.c
drivers/s390/cio/css.c
drivers/s390/cio/css.h
drivers/s390/cio/device.c
drivers/s390/cio/device.h
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_id.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/device_pgid.c
drivers/s390/cio/device_status.c
drivers/s390/cio/qdio.c
drivers/sbus/char/bbc_envctrl.c
drivers/sbus/char/envctrl.c
drivers/scsi/aha152x.c
drivers/scsi/dtc.c
drivers/scsi/fdomain.c
drivers/scsi/megaraid/megaraid_mbox.c
drivers/scsi/seagate.c
drivers/scsi/t128.c
drivers/scsi/wd7000.c
drivers/serial/ioc4_serial.c
drivers/serial/sh-sci.c
drivers/serial/sunzilog.c
drivers/sn/Kconfig
drivers/sn/Makefile
drivers/sn/ioc4.c [deleted file]
drivers/video/Kconfig
drivers/video/nvidia/nv_i2c.c
drivers/w1/Kconfig
fs/Kconfig
fs/Makefile
fs/afs/dir.c
fs/autofs4/autofs_i.h
fs/autofs4/init.c
fs/autofs4/inode.c
fs/autofs4/waitq.c
fs/binfmt_elf.c
fs/bio.c
fs/buffer.c
fs/cifs/cifsacl.h
fs/cifs/cifsencrypt.h
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/md5.c
fs/cifs/md5.h
fs/cifs/misc.c
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smbdes.c
fs/cifs/smbencrypt.c
fs/compat_ioctl.c
fs/dcache.c
fs/dlm/Kconfig
fs/dlm/lowcomms.c
fs/eventpoll.c
fs/ext2/super.c
fs/ext3/super.c
fs/ext4/Makefile [new file with mode: 0644]
fs/ext4/acl.c [new file with mode: 0644]
fs/ext4/acl.h [new file with mode: 0644]
fs/ext4/balloc.c [new file with mode: 0644]
fs/ext4/bitmap.c [new file with mode: 0644]
fs/ext4/dir.c [new file with mode: 0644]
fs/ext4/extents.c [new file with mode: 0644]
fs/ext4/file.c [new file with mode: 0644]
fs/ext4/fsync.c [new file with mode: 0644]
fs/ext4/hash.c [new file with mode: 0644]
fs/ext4/ialloc.c [new file with mode: 0644]
fs/ext4/inode.c [new file with mode: 0644]
fs/ext4/ioctl.c [new file with mode: 0644]
fs/ext4/namei.c [new file with mode: 0644]
fs/ext4/namei.h [new file with mode: 0644]
fs/ext4/resize.c [new file with mode: 0644]
fs/ext4/super.c [new file with mode: 0644]
fs/ext4/symlink.c [new file with mode: 0644]
fs/ext4/xattr.c [new file with mode: 0644]
fs/ext4/xattr.h [new file with mode: 0644]
fs/ext4/xattr_security.c [new file with mode: 0644]
fs/ext4/xattr_trusted.c [new file with mode: 0644]
fs/ext4/xattr_user.c [new file with mode: 0644]
fs/fat/inode.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/locking/dlm/mount.c
fs/gfs2/log.c
fs/gfs2/lops.c
fs/gfs2/ops_address.c
fs/gfs2/rgrp.h
fs/hugetlbfs/inode.c
fs/ioprio.c
fs/jbd/journal.c
fs/jbd2/Makefile [new file with mode: 0644]
fs/jbd2/checkpoint.c [new file with mode: 0644]
fs/jbd2/commit.c [new file with mode: 0644]
fs/jbd2/journal.c [new file with mode: 0644]
fs/jbd2/recovery.c [new file with mode: 0644]
fs/jbd2/revoke.c [new file with mode: 0644]
fs/jbd2/transaction.c [new file with mode: 0644]
fs/jffs2/super.c
fs/lockd/svc4proc.c
fs/lockd/svcproc.c
fs/lockd/svcsubs.c
fs/minix/inode.c
fs/nfsd/lockd.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4proc.c
fs/ocfs2/super.c
fs/partitions/check.c
fs/proc/base.c
fs/reiserfs/super.c
fs/splice.c
fs/super.c
fs/sysv/super.c
include/acpi/pdc_intel.h
include/acpi/processor.h
include/asm-alpha/io.h
include/asm-arm/arch-versatile/hardware.h
include/asm-arm/io.h
include/asm-arm/uaccess.h
include/asm-avr32/irq_regs.h [new file with mode: 0644]
include/asm-frv/highmem.h
include/asm-frv/io.h
include/asm-generic/bitops/sched.h
include/asm-i386/io.h
include/asm-i386/processor.h
include/asm-i386/uaccess.h
include/asm-i386/unistd.h
include/asm-i386/vic.h
include/asm-i386/voyager.h
include/asm-m32r/io.h
include/asm-m68k/sun3mmu.h
include/asm-m68k/uaccess.h
include/asm-m68knommu/unistd.h
include/asm-mips/io.h
include/asm-mips/irq.h
include/asm-mips/stackframe.h
include/asm-mips/termbits.h
include/asm-powerpc/io.h
include/asm-ppc/io.h
include/asm-s390/cio.h
include/asm-s390/timer.h
include/asm-sh/cpu-sh4/ubc.h
include/asm-sh/hw_irq.h
include/asm-sh/io.h
include/asm-sh/irq.h
include/asm-sh/irq_regs.h [new file with mode: 0644]
include/asm-sh/timer.h
include/asm-sh64/io.h
include/asm-sparc64/io.h
include/asm-x86_64/io.h
include/asm-x86_64/processor.h
include/linux/acpi.h
include/linux/bitmap.h
include/linux/blkdev.h
include/linux/buffer_head.h
include/linux/carta_random32.h [new file with mode: 0644]
include/linux/compat_ioctl.h
include/linux/cpumask.h
include/linux/dcache.h
include/linux/elevator.h
include/linux/ext4_fs.h [new file with mode: 0644]
include/linux/ext4_fs_extents.h [new file with mode: 0644]
include/linux/ext4_fs_i.h [new file with mode: 0644]
include/linux/ext4_fs_sb.h [new file with mode: 0644]
include/linux/ext4_jbd2.h [new file with mode: 0644]
include/linux/fs.h
include/linux/hugetlb.h
include/linux/io.h
include/linux/irq.h
include/linux/jbd2.h [new file with mode: 0644]
include/linux/lockd/bind.h
include/linux/lockd/xdr.h
include/linux/magic.h
include/linux/mm.h
include/linux/module.h
include/linux/nbd.h
include/linux/net.h
include/linux/nodemask.h
include/linux/random.h
include/linux/security.h
include/linux/sunrpc/msg_prot.h
include/linux/sunrpc/xdr.h
include/linux/syscalls.h
include/linux/videodev2.h
include/net/bluetooth/hci_core.h
include/net/flow.h
include/net/inet_timewait_sock.h
include/net/inetpeer.h
include/net/netlabel.h
include/net/sctp/sctp.h
include/net/sctp/ulpevent.h
include/net/timewait_sock.h
include/net/xfrm.h
kernel/cpu.c
kernel/fork.c
kernel/irq/chip.c
kernel/irq/proc.c
kernel/lockdep.c
kernel/module.c
kernel/posix-cpu-timers.c
kernel/power/disk.c
kernel/power/swap.c
kernel/power/user.c
kernel/printk.c
kernel/profile.c
kernel/sched.c
kernel/sys_ni.c
kernel/time/jiffies.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/bitmap.c
lib/random32.c [new file with mode: 0644]
mm/hugetlb.c
mm/mempolicy.c
mm/mmap.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/shmem_acl.c
mm/truncate.c
mm/vmalloc.c
mm/vmscan.c
net/bluetooth/af_bluetooth.c
net/bluetooth/bnep/core.c
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/sock.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/hci_sysfs.c
net/bluetooth/hidp/core.c
net/bluetooth/hidp/sock.c
net/bluetooth/l2cap.c
net/bluetooth/rfcomm/core.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/rfcomm/tty.c
net/bluetooth/sco.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_private.h
net/bridge/br_stp_if.c
net/compat.c
net/core/dev.c
net/core/flow.c
net/core/rtnetlink.c
net/core/scm.c
net/core/utils.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/decnet/af_decnet.c
net/decnet/dn_route.c
net/ipv4/cipso_ipv4.c
net/ipv4/inetpeer.c
net/ipv4/ip_gre.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_conntrack_netlink.c
net/ipv4/netfilter/ipt_ECN.c
net/ipv4/netfilter/ipt_TOS.c
net/ipv4/route.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/xfrm4_policy.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/fib6_rules.c
net/ipv6/ndisc.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_policy.c
net/key/af_key.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_netlink.c
net/netfilter/xt_NFQUEUE.c
net/netfilter/xt_connmark.c
net/netlabel/netlabel_kapi.c
net/sched/sch_htb.c
net/sctp/proc.c
net/sctp/socket.c
net/sctp/ulpevent.c
net/sctp/ulpqueue.c
net/sunrpc/svc.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/Makefile.headersinst
scripts/Makefile.modpost
scripts/kconfig/lxdialog/dialog.h
scripts/kernel-doc
security/dummy.c
security/selinux/include/xfrm.h
security/selinux/ss/ebitmap.c
security/selinux/ss/mls.c
security/selinux/ss/policydb.c
security/selinux/ss/services.c
security/selinux/xfrm.c

index 3c384c0cf86e0a6484e121fdc817c4307dcbce21..4dc28cc935037c3b9c8f7cdc4dbb26854fbc2ea8 100644 (file)
@@ -34,6 +34,8 @@ ext2.txt
        - info, mount options and specifications for the Ext2 filesystem.
 ext3.txt
        - info, mount options and specifications for the Ext3 filesystem.
+ext4.txt
+       - info, mount options and specifications for the Ext4 filesystem.
 files.txt
        - info on file management in the Linux kernel.
 fuse.txt
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
new file mode 100644 (file)
index 0000000..6a4adca
--- /dev/null
@@ -0,0 +1,236 @@
+
+Ext4 Filesystem
+===============
+
+This is a development version of the ext4 filesystem, an advanced level
+of the ext3 filesystem which incorporates scalability and reliability
+enhancements for supporting large filesystems (64 bit) in keeping with
+increasing disk capacities and state-of-the-art feature requirements.
+
+Mailing list: linux-ext4@vger.kernel.org
+
+
+1. Quick usage instructions:
+===========================
+
+  - Grab updated e2fsprogs from
+    ftp://ftp.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs-interim/
+    This is a patchset on top of e2fsprogs-1.39, which can be found at
+    ftp://ftp.kernel.org/pub/linux/kernel/people/tytso/e2fsprogs/
+
+  - It's still mke2fs -j /dev/hda1
+
+  - mount /dev/hda1 /wherever -t ext4dev
+
+  - To enable extents,
+
+       mount /dev/hda1 /wherever -t ext4dev -o extents
+
+  - The filesystem is compatible with the ext3 driver until you add a file
+    which has extents (ie: `mount -o extents', then create a file).
+
+    NOTE: The "extents" mount flag is temporary.  It will soon go away and
+    extents will be enabled by the "-o extents" flag to mke2fs or tune2fs
+
+  - When comparing performance with other filesystems, remember that
+    ext3/4 by default offers higher data integrity guarantees than most.  So
+    when comparing with a metadata-only journalling filesystem, use `mount -o
+    data=writeback'.  And you might as well use `mount -o nobh' too along
+    with it.  Making the journal larger than the mke2fs default often helps
+    performance with metadata-intensive workloads.
+
+2. Features
+===========
+
+2.1 Currently available
+
+* ability to use filesystems > 16TB
+* extent format reduces metadata overhead (RAM, IO for access, transactions)
+* extent format more robust in face of on-disk corruption due to magics,
+* internal redunancy in tree
+
+2.1 Previously available, soon to be enabled by default by "mkefs.ext4":
+
+* dir_index and resize inode will be on by default
+* large inodes will be used by default for fast EAs, nsec timestamps, etc
+
+2.2 Candidate features for future inclusion
+
+There are several under discussion, whether they all make it in is
+partly a function of how much time everyone has to work on them:
+
+* improved file allocation (multi-block alloc, delayed alloc; basically done)
+* fix 32000 subdirectory limit (patch exists, needs some e2fsck work)
+* nsec timestamps for mtime, atime, ctime, create time (patch exists,
+  needs some e2fsck work)
+* inode version field on disk (NFSv4, Lustre; prototype exists)
+* reduced mke2fs/e2fsck time via uninitialized groups (prototype exists)
+* journal checksumming for robustness, performance (prototype exists)
+* persistent file preallocation (e.g for streaming media, databases)
+
+Features like metadata checksumming have been discussed and planned for
+a bit but no patches exist yet so I'm not sure they're in the near-term
+roadmap.
+
+The big performance win will come with mballoc and delalloc.  CFS has
+been using mballoc for a few years already with Lustre, and IBM + Bull
+did a lot of benchmarking on it.  The reason it isn't in the first set of
+patches is partly a manageability issue, and partly because it doesn't
+directly affect the on-disk format (outside of much better allocation)
+so it isn't critical to get into the first round of changes.  I believe
+Alex is working on a new set of patches right now.
+
+3. Options
+==========
+
+When mounting an ext4 filesystem, the following option are accepted:
+(*) == default
+
+extents                        ext4 will use extents to address file data.  The
+                       file system will no longer be mountable by ext3.
+
+journal=update         Update the ext4 file system's journal to the current
+                       format.
+
+journal=inum           When a journal already exists, this option is ignored.
+                       Otherwise, it specifies the number of the inode which
+                       will represent the ext4 file system's journal file.
+
+journal_dev=devnum     When the external journal device's major/minor numbers
+                       have changed, this option allows the user to specify
+                       the new journal location.  The journal device is
+                       identified through its new major/minor numbers encoded
+                       in devnum.
+
+noload                 Don't load the journal on mounting.
+
+data=journal           All data are committed into the journal prior to being
+                       written into the main file system.
+
+data=ordered   (*)     All data are forced directly out to the main file
+                       system prior to its metadata being committed to the
+                       journal.
+
+data=writeback         Data ordering is not preserved, data may be written
+                       into the main file system after its metadata has been
+                       committed to the journal.
+
+commit=nrsec   (*)     Ext4 can be told to sync all its data and metadata
+                       every 'nrsec' seconds. The default value is 5 seconds.
+                       This means that if you lose your power, you will lose
+                       as much as the latest 5 seconds of work (your
+                       filesystem will not be damaged though, thanks to the
+                       journaling).  This default value (or any low value)
+                       will hurt performance, but it's good for data-safety.
+                       Setting it to 0 will have the same effect as leaving
+                       it at the default (5 seconds).
+                       Setting it to very large values will improve
+                       performance.
+
+barrier=1              This enables/disables barriers.  barrier=0 disables
+                       it, barrier=1 enables it.
+
+orlov          (*)     This enables the new Orlov block allocator. It is
+                       enabled by default.
+
+oldalloc               This disables the Orlov block allocator and enables
+                       the old block allocator.  Orlov should have better
+                       performance - we'd like to get some feedback if it's
+                       the contrary for you.
+
+user_xattr             Enables Extended User Attributes.  Additionally, you
+                       need to have extended attribute support enabled in the
+                       kernel configuration (CONFIG_EXT4_FS_XATTR).  See the
+                       attr(5) manual page and http://acl.bestbits.at/ to
+                       learn more about extended attributes.
+
+nouser_xattr           Disables Extended User Attributes.
+
+acl                    Enables POSIX Access Control Lists support.
+                       Additionally, you need to have ACL support enabled in
+                       the kernel configuration (CONFIG_EXT4_FS_POSIX_ACL).
+                       See the acl(5) manual page and http://acl.bestbits.at/
+                       for more information.
+
+noacl                  This option disables POSIX Access Control List
+                       support.
+
+reservation
+
+noreservation
+
+bsddf          (*)     Make 'df' act like BSD.
+minixdf                        Make 'df' act like Minix.
+
+check=none             Don't do extra checking of bitmaps on mount.
+nocheck
+
+debug                  Extra debugging information is sent to syslog.
+
+errors=remount-ro(*)   Remount the filesystem read-only on an error.
+errors=continue                Keep going on a filesystem error.
+errors=panic           Panic and halt the machine if an error occurs.
+
+grpid                  Give objects the same group ID as their creator.
+bsdgroups
+
+nogrpid                (*)     New objects have the group ID of their creator.
+sysvgroups
+
+resgid=n               The group ID which may use the reserved blocks.
+
+resuid=n               The user ID which may use the reserved blocks.
+
+sb=n                   Use alternate superblock at this location.
+
+quota
+noquota
+grpquota
+usrquota
+
+bh             (*)     ext4 associates buffer heads to data pages to
+nobh                   (a) cache disk block mapping information
+                       (b) link pages into transaction to provide
+                           ordering guarantees.
+                       "bh" option forces use of buffer heads.
+                       "nobh" option tries to avoid associating buffer
+                       heads (supported only for "writeback" mode).
+
+
+Data Mode
+---------
+There are 3 different data modes:
+
+* writeback mode
+In data=writeback mode, ext4 does not journal data at all.  This mode provides
+a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
+mode - metadata journaling.  A crash+recovery can cause incorrect data to
+appear in files which were written shortly before the crash.  This mode will
+typically provide the best ext4 performance.
+
+* ordered mode
+In data=ordered mode, ext4 only officially journals metadata, but it logically
+groups metadata and data blocks into a single unit called a transaction.  When
+it's time to write the new metadata out to disk, the associated data blocks
+are written first.  In general, this mode performs slightly slower than
+writeback but significantly faster than journal mode.
+
+* journal mode
+data=journal mode provides full data and metadata journaling.  All new data is
+written to the journal first, and then to its final location.
+In the event of a crash, the journal can be replayed, bringing both data and
+metadata into a consistent state.  This mode is the slowest except when data
+needs to be read from and written to disk at the same time where it
+outperforms all others modes.
+
+References
+==========
+
+kernel source: <file:fs/ext4/>
+               <file:fs/jbd2/>
+
+programs:      http://e2fsprogs.sourceforge.net/
+               http://ext2resize.sourceforge.net
+
+useful links:  http://fedoraproject.org/wiki/ext3-devel
+               http://www.bullopensource.org/ext4/
index 71aa403452722348f2a1668db3b6b35699e833df..e50595bfd8ea63e0cfc15c0c4cadd94e01ea1224 100644 (file)
@@ -30,9 +30,10 @@ detailed description):
        - ACPI sounds
        - temperature sensors
        - Experimental: embedded controller register dump
-       - Experimental: LCD brightness control
-       - Experimental: volume control
+       - LCD brightness control
+       - Volume control
        - Experimental: fan speed, fan enable/disable
+       - Experimental: WAN enable and disable
 
 A compatibility table by model and feature is maintained on the web
 site, http://ibm-acpi.sf.net/. I appreciate any success or failure
@@ -52,40 +53,7 @@ Installation
 
 If you are compiling this driver as included in the Linux kernel
 sources, simply enable the CONFIG_ACPI_IBM option (Power Management /
-ACPI / IBM ThinkPad Laptop Extras). The rest of this section describes
-how to install this driver when downloaded from the web site.
-
-First, you need to get a kernel with ACPI support up and running.
-Please refer to http://acpi.sourceforge.net/ for help with this
-step. How successful you will be depends a lot on you ThinkPad model,
-the kernel you are using and any additional patches applied. The
-kernel provided with your distribution may not be good enough. I
-needed to compile a 2.6.7 kernel with the 20040715 ACPI patch to get
-ACPI working reliably on my ThinkPad X40. Old ThinkPad models may not
-be supported at all.
-
-Assuming you have the basic ACPI support working (e.g. you can see the
-/proc/acpi directory), follow the following steps to install this
-driver:
-
-       - unpack the archive:
-
-               tar xzvf ibm-acpi-x.y.tar.gz; cd ibm-acpi-x.y
-
-       - compile the driver:
-
-               make
-
-       - install the module in your kernel modules directory:
-
-               make install
-
-       - load the module:
-
-               modprobe ibm_acpi
-
-After loading the module, check the "dmesg" output for any error messages.
-
+ACPI / IBM ThinkPad Laptop Extras).
 
 Features
 --------
@@ -523,13 +491,8 @@ registers contain the current battery capacity, etc. If you experiment
 with this, do send me your results (including some complete dumps with
 a description of the conditions when they were taken.)
 
-EXPERIMENTAL: LCD brightness control -- /proc/acpi/ibm/brightness
------------------------------------------------------------------
-
-This feature is marked EXPERIMENTAL because the implementation
-directly accesses hardware registers and may not work as expected. USE
-WITH CAUTION! To use this feature, you need to supply the
-experimental=1 parameter when loading the module.
+LCD brightness control -- /proc/acpi/ibm/brightness
+---------------------------------------------------
 
 This feature allows software control of the LCD brightness on ThinkPad
 models which don't have a hardware brightness slider. The available
@@ -542,13 +505,8 @@ commands are:
 The <level> number range is 0 to 7, although not all of them may be
 distinct. The current brightness level is shown in the file.
 
-EXPERIMENTAL: Volume control -- /proc/acpi/ibm/volume
------------------------------------------------------
-
-This feature is marked EXPERIMENTAL because the implementation
-directly accesses hardware registers and may not work as expected. USE
-WITH CAUTION! To use this feature, you need to supply the
-experimental=1 parameter when loading the module.
+Volume control -- /proc/acpi/ibm/volume
+---------------------------------------
 
 This feature allows volume control on ThinkPad models which don't have
 a hardware volume knob. The available commands are:
@@ -611,6 +569,23 @@ with the following command:
 
        echo 'level <level>' > /proc/acpi/ibm/thermal
 
+EXPERIMENTAL: WAN -- /proc/acpi/ibm/wan
+---------------------------------------
+
+This feature is marked EXPERIMENTAL because the implementation
+directly accesses hardware registers and may not work as expected. USE
+WITH CAUTION! To use this feature, you need to supply the
+experimental=1 parameter when loading the module.
+
+This feature shows the presence and current state of a WAN (Sierra
+Wireless EV-DO) device. If WAN is installed, the following commands can
+be used:
+
+       echo enable > /proc/acpi/ibm/wan
+       echo disable > /proc/acpi/ibm/wan
+
+It was tested on a Lenovo Thinkpad X60. It should probably work on other
+Thinkpad models which come with this module installed.
 
 Multiple Commands, Module Parameters
 ------------------------------------
index dab123db5a4fed62b324b9ffe2624fc04c0b5846..488773018152056ea159685e732e42452a7ae142 100644 (file)
@@ -50,10 +50,10 @@ The bit position indicates hardirq, softirq, hardirq-read,
 softirq-read respectively, and the character displayed in each
 indicates:
 
-   '.'  acquired while irqs enabled
+   '.'  acquired while irqs disabled
    '+'  acquired in irq context
-   '-'  acquired in process context with irqs disabled
-   '?'  read-acquired both with irqs enabled and in irq context
+   '-'  acquired with irqs enabled
+   '?' read acquired in irq context with irqs enabled.
 
 Unused mutexes cannot be part of the cause of an error.
 
index 89bf8c20a5860787bc27fc2a5836fd5e0833a051..0bc7f1e3c9e6aa2e340bec8f512684351630fc9f 100644 (file)
@@ -86,7 +86,7 @@ valid for 30 seconds.
 core_pattern:
 
 core_pattern is used to specify a core dumpfile pattern name.
-. max length 64 characters; default value is "core"
+. max length 128 characters; default value is "core"
 . core_pattern is used as a pattern template for the output filename;
   certain string patterns (beginning with '%') are substituted with
   their actual values.
@@ -105,6 +105,9 @@ core_pattern is used to specify a core dumpfile pattern name.
        %h      hostname
        %e      executable filename
        %<OTHER> both are dropped
+. If the first character of the pattern is a '|', the kernel will treat
+  the rest of the pattern as a command to run.  The core dump will be
+  written to the standard input of that program instead of to a file.
 
 ==============================================================
 
index 126e59d935cd2348f162fc06aefe2538478584c4..8755b3e7b09e500446d99800e8abc90df96c2b85 100644 (file)
@@ -51,7 +51,7 @@
  50 -> NPG Tech Real TV FM Top 10                          [14f1:0842]
  51 -> WinFast DTV2000 H                                   [107d:665e]
  52 -> Geniatech DVB-S                                     [14f1:0084]
- 53 -> Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T  [0070:1404]
+ 53 -> Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T  [0070:1404,0070:1400,0070:1401,0070:1402]
  54 -> Norwood Micro TV Tuner
  55 -> Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM  [c180:c980]
  56 -> Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder   [0070:9600,0070:9601,0070:9602]
index 931e6e40c08b4b468b6504993813f498a7507f16..5305dd69095b3b5e7605c6dd3539168d43cc4e77 100644 (file)
@@ -905,7 +905,8 @@ P:  David Teigland
 M:     teigland@redhat.com
 L:     cluster-devel@redhat.com
 W:     http://sources.redhat.com/cluster/
-T:     git kernel.org:/pub/scm/linux/kernel/git/steve/gfs-2.6.git
+T:     git kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes.git
+T:     git kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw.git
 S:     Supported
 
 DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
@@ -1188,7 +1189,8 @@ P:        Steven Whitehouse
 M:     swhiteho@redhat.com
 L:     cluster-devel@redhat.com
 W:     http://sources.redhat.com/cluster/
-T:     git kernel.org:/pub/scm/linux/kernel/git/steve/gfs-2.6.git
+T:     git kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-fixes.git
+T:     git kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw.git
 S:     Supported
 
 GIGASET ISDN DRIVERS
@@ -1996,6 +1998,13 @@ M:       rubini@ipvvis.unipv.it
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
+MSI LAPTOP SUPPORT
+P:     Lennart Poettering
+M:     mzxreary@0pointer.de
+L:     https://tango.0pointer.de/mailman/listinfo/s270-linux
+W:     http://0pointer.de/lennart/tchibo.html
+S:     Maintained
+
 MTRR AND SIMILAR SUPPORT [i386]
 P:     Richard Gooch
 M:     rgooch@atnf.csiro.au
@@ -2003,8 +2012,11 @@ L:       linux-kernel@vger.kernel.org
 W:     http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html
 S:     Maintained
 
-MULTIMEDIA CARD (MMC) SUBSYSTEM
-S:     Orphan
+MULTIMEDIA CARD (MMC) AND SECURE DIGITAL (SD) SUBSYSTEM
+P:     Pierre Ossman
+M:     drzeus-mmc@drzeus.cx
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
 
 MULTISOUND SOUND DRIVER
 P:     Andrew Veliath
@@ -2040,11 +2052,13 @@ P:      Marc Boucher
 P:     James Morris
 P:     Harald Welte
 P:     Jozsef Kadlecsik
-M:     coreteam@netfilter.org
+P:     Patrick McHardy
+M:     kaber@trash.net
+L:     netfilter-devel@lists.netfilter.org
+L:     netfilter@lists.netfilter.org
+L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
-L:     netfilter@lists.netfilter.org
-L:     netfilter-devel@lists.netfilter.org
 S:     Supported
 
 NETLABEL
index 274b780029b19edf4a8e01c747a19fb1f9711602..62a1343cf327c84e0afae0128b7d705c9330966b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 19
-EXTRAVERSION =-rc1
+EXTRAVERSION =-rc2
 NAME=Avast! A bilge rat!
 
 # *DOCUMENTATION*
@@ -741,6 +741,9 @@ endif # ifdef CONFIG_KALLSYMS
 
 # vmlinux image - including updated kernel symbols
 vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) $(kallsyms.o) FORCE
+ifdef CONFIG_HEADERS_CHECK
+       $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
+endif
        $(call if_changed_rule,vmlinux__)
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost $@
        $(Q)rm -f .old_version
@@ -932,7 +935,7 @@ headers_install_all: include/linux/version.h scripts_basic FORCE
 
 PHONY += headers_install
 headers_install: include/linux/version.h scripts_basic FORCE
-       @if [ ! -r include/asm-$(ARCH)/Kbuild ]; then \
+       @if [ ! -r $(srctree)/include/asm-$(ARCH)/Kbuild ]; then \
          echo '*** Error: Headers not exportable for this architecture ($(ARCH))'; \
          exit 1 ; fi
        $(Q)$(MAKE) $(build)=scripts scripts/unifdef
@@ -1316,7 +1319,8 @@ define xtags
            $(all-sources) | xargs $1 -a \
                -I __initdata,__exitdata,__acquires,__releases \
                -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \
-               --extra=+f --c-kinds=+px; \
+               --extra=+f --c-kinds=+px \
+               --regex-asm='/ENTRY\(([^)]*)\).*/\1/'; \
            $(all-kconfigs) | xargs $1 -a \
                --langdef=kconfig \
                --language-force=kconfig \
index 8b02420f732eb8e1df10db233b2444d9881470b7..e9762a33b0439b3d70d451954f3bc790c40cb0f2 100644 (file)
@@ -6,40 +6,13 @@
  */
 
 #include <linux/module.h>
-#include <linux/string.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/socket.h>
-#include <linux/syscalls.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/pci.h>
-#include <linux/screen_info.h>
-#include <linux/tty.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/io.h>
 #include <asm/console.h>
-#include <asm/hwrpb.h>
 #include <asm/uaccess.h>
-#include <asm/processor.h>
 #include <asm/checksum.h>
-#include <linux/interrupt.h>
 #include <asm/fpu.h>
-#include <asm/irq.h>
 #include <asm/machvec.h>
-#include <asm/pgalloc.h>
-#include <asm/semaphore.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-#include <asm/vga.h>
 
-#include <asm/unistd.h>
-
-extern struct hwrpb_struct *hwrpb;
-extern spinlock_t rtc_lock;
+#include <linux/syscalls.h>
 
 /* these are C runtime functions with special calling conventions: */
 extern void __divl (void);
@@ -52,14 +25,9 @@ extern void __divqu (void);
 extern void __remqu (void);
 
 EXPORT_SYMBOL(alpha_mv);
-EXPORT_SYMBOL(screen_info);
-EXPORT_SYMBOL(perf_irq);
 EXPORT_SYMBOL(callback_getenv);
 EXPORT_SYMBOL(callback_setenv);
 EXPORT_SYMBOL(callback_save_env);
-#ifdef CONFIG_ALPHA_GENERIC
-EXPORT_SYMBOL(alpha_using_srm);
-#endif /* CONFIG_ALPHA_GENERIC */
 
 /* platform dependent support */
 EXPORT_SYMBOL(strcat);
@@ -77,47 +45,14 @@ EXPORT_SYMBOL(__constant_c_memset);
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(clear_page);
 
-EXPORT_SYMBOL(__direct_map_base);
-EXPORT_SYMBOL(__direct_map_size);
-
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
-EXPORT_SYMBOL(pci_map_single);
-EXPORT_SYMBOL(pci_map_page);
-EXPORT_SYMBOL(pci_unmap_single);
-EXPORT_SYMBOL(pci_unmap_page);
-EXPORT_SYMBOL(pci_map_sg);
-EXPORT_SYMBOL(pci_unmap_sg);
-EXPORT_SYMBOL(pci_dma_supported);
-EXPORT_SYMBOL(pci_dac_dma_supported);
-EXPORT_SYMBOL(pci_dac_page_to_dma);
-EXPORT_SYMBOL(pci_dac_dma_to_page);
-EXPORT_SYMBOL(pci_dac_dma_to_offset);
-EXPORT_SYMBOL(alpha_gendev_to_pci);
-#endif
-EXPORT_SYMBOL(dma_set_mask);
-
-EXPORT_SYMBOL(dump_thread);
-EXPORT_SYMBOL(dump_elf_thread);
-EXPORT_SYMBOL(dump_elf_task);
-EXPORT_SYMBOL(dump_elf_task_fp);
-EXPORT_SYMBOL(hwrpb);
-EXPORT_SYMBOL(start_thread);
 EXPORT_SYMBOL(alpha_read_fp_reg);
 EXPORT_SYMBOL(alpha_read_fp_reg_s);
 EXPORT_SYMBOL(alpha_write_fp_reg);
 EXPORT_SYMBOL(alpha_write_fp_reg_s);
 
-/* In-kernel system calls.  */
+/* entry.S */
 EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(sys_dup);
-EXPORT_SYMBOL(sys_exit);
-EXPORT_SYMBOL(sys_write);
-EXPORT_SYMBOL(sys_lseek);
 EXPORT_SYMBOL(kernel_execve);
-EXPORT_SYMBOL(sys_setsid);
-EXPORT_SYMBOL(sys_wait4);
 
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_tcpudp_magic);
@@ -134,10 +69,6 @@ EXPORT_SYMBOL(alpha_fp_emul_imprecise);
 EXPORT_SYMBOL(alpha_fp_emul);
 #endif
 
-#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
-EXPORT_SYMBOL(__min_ipl);
-#endif
-
 /*
  * The following are specially called from the uaccess assembly stubs.
  */
@@ -160,26 +91,9 @@ EXPORT_SYMBOL(up);
  */
 
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(flush_tlb_mm);
-EXPORT_SYMBOL(flush_tlb_range);
-EXPORT_SYMBOL(flush_tlb_page);
-EXPORT_SYMBOL(smp_imb);
-EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(smp_num_cpus);
-EXPORT_SYMBOL(smp_call_function);
-EXPORT_SYMBOL(smp_call_function_on_cpu);
 EXPORT_SYMBOL(_atomic_dec_and_lock);
 #endif /* CONFIG_SMP */
 
-/*
- * NUMA specific symbols
- */
-#ifdef CONFIG_DISCONTIGMEM
-EXPORT_SYMBOL(node_data);
-#endif /* CONFIG_DISCONTIGMEM */
-
-EXPORT_SYMBOL(rtc_lock);
-
 /*
  * The following are special because they're not called
  * explicitly (the C compiler or assembler generates them in
@@ -200,8 +114,3 @@ EXPORT_SYMBOL(__remqu);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memchr);
-
-#ifdef CONFIG_ALPHA_IRONGATE
-EXPORT_SYMBOL(irongate_ioremap);
-EXPORT_SYMBOL(irongate_iounmap);
-#endif
index 138d497d1cca8b37e4b043776cf28d784cff0ce4..e4a0bcf1d28b6834492808c0cf52b4681e3b0182 100644 (file)
@@ -404,6 +404,7 @@ irongate_ioremap(unsigned long addr, unsigned long size)
 #endif
        return (void __iomem *)vaddr;
 }
+EXPORT_SYMBOL(irongate_ioremap);
 
 void
 irongate_iounmap(volatile void __iomem *xaddr)
@@ -414,3 +415,4 @@ irongate_iounmap(volatile void __iomem *xaddr)
        if (addr)
                return vfree((void *)(PAGE_MASK & addr)); 
 }
+EXPORT_SYMBOL(irongate_iounmap);
index 6dd126b8be858111ebcacb5336f57e5289845df9..e16aeb6e79ef8cd62955f27d7fd623cacfd79cca 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/sched.h>
 #include <linux/irq.h>
 #include <linux/kernel_stat.h>
+#include <linux/module.h>
 
 #include <asm/machvec.h>
 #include <asm/dma.h>
@@ -16,6 +17,7 @@
 /* Hack minimum IPL during interrupt processing for broken hardware.  */
 #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
 int __min_ipl;
+EXPORT_SYMBOL(__min_ipl);
 #endif
 
 /*
@@ -30,6 +32,7 @@ dummy_perf(unsigned long vector, struct pt_regs *regs)
 }
 
 void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf;
+EXPORT_SYMBOL(perf_irq);
 
 /*
  * The main interrupt entry point.
index fff5cf93e8164a6a465f5c538d16350d271307e0..174b729c504b274084c10bd78ff9e4465f3a9cda 100644 (file)
@@ -201,6 +201,7 @@ dma_set_mask(struct device *dev, u64 mask)
 
        return 0;
 }
+EXPORT_SYMBOL(dma_set_mask);
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
index c468e312e5f815bd67b19c7354bf65422ea51d4b..6e7d1fe6e93532daa923ae6d6b69839e639954b6 100644 (file)
@@ -300,6 +300,7 @@ pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
        dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; 
        return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
 }
+EXPORT_SYMBOL(pci_map_single);
 
 dma_addr_t
 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
@@ -314,6 +315,7 @@ pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
        return pci_map_single_1(pdev, (char *)page_address(page) + offset, 
                                size, dac_allowed);
 }
+EXPORT_SYMBOL(pci_map_page);
 
 /* Unmap a single streaming mode DMA translation.  The DMA_ADDR and
    SIZE must match what was provided for in a previous pci_map_single
@@ -379,6 +381,7 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
        DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
              dma_addr, size, npages, __builtin_return_address(0));
 }
+EXPORT_SYMBOL(pci_unmap_single);
 
 void
 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
@@ -386,6 +389,7 @@ pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
 {
        pci_unmap_single(pdev, dma_addr, size, direction);
 }
+EXPORT_SYMBOL(pci_unmap_page);
 
 /* Allocate and map kernel buffer using consistent mode DMA for PCI
    device.  Returns non-NULL cpu-view pointer to the buffer if
@@ -427,6 +431,7 @@ try_again:
 
        return cpu_addr;
 }
+EXPORT_SYMBOL(pci_alloc_consistent);
 
 /* Free and unmap a consistent DMA buffer.  CPU_ADDR and DMA_ADDR must
    be values that were returned from pci_alloc_consistent.  SIZE must
@@ -444,7 +449,7 @@ pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
        DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
              dma_addr, size, __builtin_return_address(0));
 }
-
+EXPORT_SYMBOL(pci_free_consistent);
 
 /* Classify the elements of the scatterlist.  Write dma_address
    of each element with:
@@ -672,6 +677,7 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
                pci_unmap_sg(pdev, start, out - start, direction);
        return 0;
 }
+EXPORT_SYMBOL(pci_map_sg);
 
 /* Unmap a set of streaming mode DMA translations.  Again, cpu read
    rules concerning calls here are the same as for pci_unmap_single()
@@ -752,6 +758,7 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
 
        DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
 }
+EXPORT_SYMBOL(pci_unmap_sg);
 
 
 /* Return whether the given PCI device DMA address mask can be
@@ -786,6 +793,7 @@ pci_dma_supported(struct pci_dev *pdev, u64 mask)
 
        return 0;
 }
+EXPORT_SYMBOL(pci_dma_supported);
 
 \f
 /*
@@ -908,6 +916,7 @@ pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
 
        return ok;
 }
+EXPORT_SYMBOL(pci_dac_dma_supported);
 
 dma64_addr_t
 pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page,
@@ -917,6 +926,7 @@ pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page,
                + __pa(page_address(page)) 
                + (dma64_addr_t) offset);
 }
+EXPORT_SYMBOL(pci_dac_page_to_dma);
 
 struct page *
 pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
@@ -924,13 +934,14 @@ pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
        unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset;
        return virt_to_page(__va(paddr));
 }
+EXPORT_SYMBOL(pci_dac_dma_to_page);
 
 unsigned long
 pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
 {
        return (dma_addr & ~PAGE_MASK);
 }
-
+EXPORT_SYMBOL(pci_dac_dma_to_offset);
 
 /* Helper for generic DMA-mapping functions. */
 
@@ -957,6 +968,7 @@ alpha_gendev_to_pci(struct device *dev)
        /* This assumes ISA bus master with dma_mask 0xffffff. */
        return NULL;
 }
+EXPORT_SYMBOL(alpha_gendev_to_pci);
 
 int
 dma_set_mask(struct device *dev, u64 mask)
@@ -969,3 +981,4 @@ dma_set_mask(struct device *dev, u64 mask)
 
        return 0;
 }
+EXPORT_SYMBOL(dma_set_mask);
index b3a8a29803654eab232f478d7a1c58a9a483c1ee..3370e6faeae022d5209e1bb05188959823e0a944 100644 (file)
@@ -205,6 +205,7 @@ start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
        regs->ps = 8;
        wrusp(sp);
 }
+EXPORT_SYMBOL(start_thread);
 
 /*
  * Free current thread data structures etc..
@@ -376,6 +377,7 @@ dump_thread(struct pt_regs * pt, struct user * dump)
        dump->regs[EF_A2]  = pt->r18;
        memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8);
 }
+EXPORT_SYMBOL(dump_thread);
 
 /*
  * Fill in the user structure for a ELF core dump.
@@ -424,6 +426,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
           useful value of the thread's UNIQUE field.  */
        dest[32] = ti->pcb.unique;
 }
+EXPORT_SYMBOL(dump_elf_thread);
 
 int
 dump_elf_task(elf_greg_t *dest, struct task_struct *task)
@@ -431,6 +434,7 @@ dump_elf_task(elf_greg_t *dest, struct task_struct *task)
        dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task));
        return 1;
 }
+EXPORT_SYMBOL(dump_elf_task);
 
 int
 dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
@@ -439,6 +443,7 @@ dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
        memcpy(dest, sw->fp, 32 * 8);
        return 1;
 }
+EXPORT_SYMBOL(dump_elf_task_fp);
 
 /*
  * sys_execve() executes a new program.
index a94e6d93e2eedb695f83e718a2fd3eba1b674807..1aea7c7c683cddb0d5b7e065f1e08f720d70a8e0 100644 (file)
@@ -66,6 +66,7 @@ static struct notifier_block alpha_panic_block = {
 
 
 struct hwrpb_struct *hwrpb;
+EXPORT_SYMBOL(hwrpb);
 unsigned long srm_hae;
 
 int alpha_l1i_cacheshape;
@@ -111,6 +112,7 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
 #ifdef CONFIG_ALPHA_GENERIC
 struct alpha_machine_vector alpha_mv;
 int alpha_using_srm;
+EXPORT_SYMBOL(alpha_using_srm);
 #endif
 
 static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
@@ -137,6 +139,8 @@ struct screen_info screen_info = {
        .orig_video_points = 16
 };
 
+EXPORT_SYMBOL(screen_info);
+
 /*
  * The direct map I/O window, if any.  This should be the same
  * for all busses, since it's used by virt_to_bus.
@@ -144,6 +148,8 @@ struct screen_info screen_info = {
 
 unsigned long __direct_map_base;
 unsigned long __direct_map_size;
+EXPORT_SYMBOL(__direct_map_base);
+EXPORT_SYMBOL(__direct_map_size);
 
 /*
  * Declare all of the machine vectors.
index 596780e2c7dace8bb40ea36b9411a1dd28223b7c..d1ec4f51df1aae6c3678dc85d393edb2d33ee321 100644 (file)
@@ -52,6 +52,7 @@
 
 /* A collection of per-processor data.  */
 struct cpuinfo_alpha cpu_data[NR_CPUS];
+EXPORT_SYMBOL(cpu_data);
 
 /* A collection of single bit ipi messages.  */
 static struct {
@@ -74,6 +75,7 @@ EXPORT_SYMBOL(cpu_online_map);
 
 int smp_num_probed;            /* Internal processor count */
 int smp_num_cpus = 1;          /* Number that came online.  */
+EXPORT_SYMBOL(smp_num_cpus);
 
 extern void calibrate_delay(void);
 
@@ -790,6 +792,7 @@ smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
 
        return 0;
 }
+EXPORT_SYMBOL(smp_call_function_on_cpu);
 
 int
 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
@@ -797,6 +800,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
        return smp_call_function_on_cpu (func, info, retry, wait,
                                         cpu_online_map);
 }
+EXPORT_SYMBOL(smp_call_function);
 
 static void
 ipi_imb(void *ignored)
@@ -811,6 +815,7 @@ smp_imb(void)
        if (on_each_cpu(ipi_imb, NULL, 1, 1))
                printk(KERN_CRIT "smp_imb: timed out\n");
 }
+EXPORT_SYMBOL(smp_imb);
 
 static void
 ipi_flush_tlb_all(void *ignored)
@@ -866,6 +871,7 @@ flush_tlb_mm(struct mm_struct *mm)
 
        preempt_enable();
 }
+EXPORT_SYMBOL(flush_tlb_mm);
 
 struct flush_tlb_page_struct {
        struct vm_area_struct *vma;
@@ -918,6 +924,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 
        preempt_enable();
 }
+EXPORT_SYMBOL(flush_tlb_page);
 
 void
 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
@@ -925,6 +932,7 @@ flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long e
        /* On the Alpha we always flush the whole user tlb.  */
        flush_tlb_mm(vma->vm_mm);
 }
+EXPORT_SYMBOL(flush_tlb_range);
 
 static void
 ipi_flush_icache_page(void *x)
index cf066652398952d0e9d14746e0865c5f42239c1e..d7053eb4ffcfd93623d58aabda82d30cf9118dee 100644 (file)
@@ -57,6 +57,7 @@
 static int set_rtc_mmss(unsigned long);
 
 DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
 
 #define TICK_SIZE (tick_nsec / 1000)
 
index b826f58c6e7247e5f73a45039a1493c9b7c90f60..e3e3806a6f254f02b3cec0d1a02fba5fc05639c2 100644 (file)
 #include <linux/swap.h>
 #include <linux/initrd.h>
 #include <linux/pfn.h>
+#include <linux/module.h>
 
 #include <asm/hwrpb.h>
 #include <asm/pgalloc.h>
 
 pg_data_t node_data[MAX_NUMNODES];
 bootmem_data_t node_bdata[MAX_NUMNODES];
+EXPORT_SYMBOL(node_data);
 
 #undef DEBUG_DISCONTIG
 #ifdef DEBUG_DISCONTIG
index da69e660574bf1510f8f1457d07c7368b7de12e2..4779f474f9113ac0a87aa0ae41ac332b02033484 100644 (file)
@@ -178,9 +178,3 @@ EXPORT_SYMBOL(_find_next_zero_bit_be);
 EXPORT_SYMBOL(_find_first_bit_be);
 EXPORT_SYMBOL(_find_next_bit_be);
 #endif
-
-       /* syscalls */
-EXPORT_SYMBOL(sys_write);
-EXPORT_SYMBOL(sys_lseek);
-EXPORT_SYMBOL(sys_exit);
-EXPORT_SYMBOL(sys_wait4);
index 2aa150b57ba109066c7c969170eacad562d0e554..3b8576111c16db0a8a9e425cacfa89e2fd5ac572 100644 (file)
@@ -188,12 +188,12 @@ static struct map_desc versatile_io_desc[] __initdata = {
                .length         = SZ_4K,
                .type           = MT_DEVICE
        }, {
-               .virtual        =  VERSATILE_PCI_VIRT_BASE,
+               .virtual        =  (unsigned long)VERSATILE_PCI_VIRT_BASE,
                .pfn            = __phys_to_pfn(VERSATILE_PCI_BASE),
                .length         = VERSATILE_PCI_BASE_SIZE,
                .type           = MT_DEVICE
        }, {
-               .virtual        =  VERSATILE_PCI_CFG_VIRT_BASE,
+               .virtual        =  (unsigned long)VERSATILE_PCI_CFG_VIRT_BASE,
                .pfn            = __phys_to_pfn(VERSATILE_PCI_CFG_BASE),
                .length         = VERSATILE_PCI_CFG_BASE_SIZE,
                .type           = MT_DEVICE
index 13bbd08ff841d2c006bd99646488193a7a7c619b..5cd0b5d9e7ebbf9e8935bb996c1748ab4d2722ce 100644 (file)
  * Cfg   42000000 - 42FFFFFF     PCI config
  *
  */
-#define SYS_PCICTL                     IO_ADDRESS(VERSATILE_SYS_PCICTL)
-#define PCI_IMAP0                      IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
-#define PCI_IMAP1                      IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
-#define PCI_IMAP2                      IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
-#define PCI_SMAP0                      IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
-#define PCI_SMAP1                      IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
-#define PCI_SMAP2                      IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
-#define PCI_SELFID                     IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
+#define __IO_ADDRESS(n) ((void __iomem *)(unsigned long)IO_ADDRESS(n))
+#define SYS_PCICTL             __IO_ADDRESS(VERSATILE_SYS_PCICTL)
+#define PCI_IMAP0              __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
+#define PCI_IMAP1              __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
+#define PCI_IMAP2              __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
+#define PCI_SMAP0              __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
+#define PCI_SMAP1              __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
+#define PCI_SMAP2              __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
+#define PCI_SELFID             __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
 
 #define DEVICE_ID_OFFSET               0x00
 #define CSR_OFFSET                     0x04
@@ -76,7 +77,7 @@ static int __init versatile_pci_slot_ignore(char *str)
 __setup("pci_slot_ignore=", versatile_pci_slot_ignore);
 
 
-static unsigned long __pci_addr(struct pci_bus *bus,
+static void __iomem *__pci_addr(struct pci_bus *bus,
                                unsigned int devfn, int offset)
 {
        unsigned int busnr = bus->number;
@@ -91,14 +92,14 @@ static unsigned long __pci_addr(struct pci_bus *bus,
        if (devfn > 255)
                BUG();
 
-       return (VERSATILE_PCI_CFG_VIRT_BASE | (busnr << 16) |
+       return VERSATILE_PCI_CFG_VIRT_BASE + ((busnr << 16) |
                (PCI_SLOT(devfn) << 11) | (PCI_FUNC(devfn) << 8) | offset);
 }
 
 static int versatile_read_config(struct pci_bus *bus, unsigned int devfn, int where,
                                 int size, u32 *val)
 {
-       unsigned long addr = __pci_addr(bus, devfn, where);
+       void __iomem *addr = __pci_addr(bus, devfn, where & ~3);
        u32 v;
        int slot = PCI_SLOT(devfn);
 
@@ -121,13 +122,12 @@ static int versatile_read_config(struct pci_bus *bus, unsigned int devfn, int wh
                        break;
 
                case 2:
-                       v = __raw_readl(addr & ~3);
-                       if (addr & 2) v >>= 16;
+                       v = __raw_readl(addr);
+                       if (where & 2) v >>= 16;
                        v &= 0xffff;
                        break;
 
                default:
-                       addr &= ~3;
                        v = __raw_readl(addr);
                        break;
                }
@@ -140,7 +140,7 @@ static int versatile_read_config(struct pci_bus *bus, unsigned int devfn, int wh
 static int versatile_write_config(struct pci_bus *bus, unsigned int devfn, int where,
                                  int size, u32 val)
 {
-       unsigned long addr = __pci_addr(bus, devfn, where);
+       void __iomem *addr = __pci_addr(bus, devfn, where);
        int slot = PCI_SLOT(devfn);
 
        if (pci_slot_ignore & (1 << slot)) {
@@ -279,7 +279,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
        printk("PCI core found (slot %d)\n",myslot);
 
        __raw_writel(myslot, PCI_SELFID);
-       local_pci_cfg_base = (void *) VERSATILE_PCI_CFG_VIRT_BASE + (myslot << 11);
+       local_pci_cfg_base = VERSATILE_PCI_CFG_VIRT_BASE + (myslot << 11);
 
        val = __raw_readl(local_pci_cfg_base + CSR_OFFSET);
        val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
index dedbb449632edc1f65a40d39e06650df14831a98..a657a28f08dbbbf701acfe16e18a6b4350430948 100644 (file)
@@ -90,7 +90,7 @@ void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
 
        info.si_signo = SIGFPE;
        info.si_code = sicode;
-       info.si_addr = (void *)(instruction_pointer(regs) - 4);
+       info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
 
        /*
         * This is the same as NWFPE, because it's not clear what
index 07907b6ecb634126b116236e3a92fcba632d4ec1..93293d04b3032c47f1181fb0576d34521e454e7b 100644 (file)
@@ -202,14 +202,6 @@ EXPORT_SYMBOL(_find_next_zero_bit_le);
 EXPORT_SYMBOL(elf_platform);
 EXPORT_SYMBOL(elf_hwcap);
 
-       /* syscalls */
-EXPORT_SYMBOL(sys_write);
-EXPORT_SYMBOL(sys_read);
-EXPORT_SYMBOL(sys_lseek);
-EXPORT_SYMBOL(sys_open);
-EXPORT_SYMBOL(sys_exit);
-EXPORT_SYMBOL(sys_wait4);
-
 #ifdef CONFIG_PREEMPT
 EXPORT_SYMBOL(kernel_flag);
 #endif
index 3e56b9f4358af4a3728c96ed2815eb033a0fc841..5a247ba71a72e1bdfcd036e3ca8840eb35fbfe08 100644 (file)
@@ -124,15 +124,15 @@ unsigned long long sched_clock(void)
  *
  * In UP mode, it is invoked from the (global) timer_interrupt.
  */
-static void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static void local_timer_interrupt(int irq, void *dev_id)
 {
        if (current->pid)
-               profile_tick(CPU_PROFILING, regs);
-       update_process_times(user_mode(regs));
+               profile_tick(CPU_PROFILING);
+       update_process_times(user_mode(get_irq_regs()));
 }
 
 static irqreturn_t
-timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+timer_interrupt(int irq, void *dev_id)
 {
        unsigned int count;
 
@@ -157,7 +157,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
         *
         * SMP is not supported yet.
         */
-       local_timer_interrupt(irq, dev_id, regs);
+       local_timer_interrupt(irq, dev_id);
 
        return IRQ_HANDLED;
 }
index 7da9c5f7a0eb8cfd14419c0ff57b9ddcfd28b7b0..4dff1f98890039f0d96fcbc54e27c76322e3e383 100644 (file)
@@ -102,8 +102,7 @@ struct irq_chip eim_chip = {
        .set_type       = eim_set_irq_type,
 };
 
-static void demux_eim_irq(unsigned int irq, struct irq_desc *desc,
-                         struct pt_regs *regs)
+static void demux_eim_irq(unsigned int irq, struct irq_desc *desc)
 {
        struct at32_sm *sm = desc->handler_data;
        struct irq_desc *ext_desc;
@@ -121,7 +120,7 @@ static void demux_eim_irq(unsigned int irq, struct irq_desc *desc,
 
                ext_irq = i + sm->eim_first_irq;
                ext_desc = irq_desc + ext_irq;
-               ext_desc->handle_irq(ext_irq, ext_desc, regs);
+               ext_desc->handle_irq(ext_irq, ext_desc);
        }
 
        spin_unlock(&sm->lock);
index 74f8c9f2f03d20b0829c8155ba18d5cceb024186..eb87a18ad7b2f70c6a49a7595efeca4ac23706b1 100644 (file)
@@ -52,16 +52,19 @@ static struct intc intc0 = {
 asmlinkage void do_IRQ(int level, struct pt_regs *regs)
 {
        struct irq_desc *desc;
+       struct pt_regs *old_regs;
        unsigned int irq;
        unsigned long status_reg;
 
        local_irq_disable();
 
+       old_regs = set_irq_regs(regs);
+
        irq_enter();
 
        irq = intc_readl(&intc0, INTCAUSE0 - 4 * level);
        desc = irq_desc + irq;
-       desc->handle_irq(irq, desc, regs);
+       desc->handle_irq(irq, desc);
 
        /*
         * Clear all interrupt level masks so that we may handle
@@ -75,6 +78,8 @@ asmlinkage void do_IRQ(int level, struct pt_regs *regs)
        sysreg_write(SR, status_reg);
 
        irq_exit();
+
+       set_irq_regs(old_regs);
 }
 
 void __init init_IRQ(void)
index 21c9a4e71104356d7c83a98ca7a92eb8c08f5216..fc4f2abccf06fdfd426b20b8195f1ae25d873c53 100644 (file)
@@ -7,6 +7,7 @@ choice
 
 config M386
        bool "386"
+       depends on !UML
        ---help---
          This is the processor type of your CPU. This information is used for
          optimizing purposes. In order to compile a kernel that can run on
@@ -301,7 +302,7 @@ config X86_USE_PPRO_CHECKSUM
 
 config X86_USE_3DNOW
        bool
-       depends on MCYRIXIII || MK7 || MGEODE_LX
+       depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
        default y
 
 config X86_OOSTORE
index 92f79cdd9a48c97aebb8a207a211cf28e421707a..ab974ff970730ec48300a2db994d1c9628f07d00 100644 (file)
@@ -332,7 +332,7 @@ acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
 /*
  * Parse Interrupt Source Override for the ACPI SCI
  */
-static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
+static void acpi_sci_ioapic_setup(u32 bus_irq, u32 gsi, u16 polarity, u16 trigger)
 {
        if (trigger == 0)       /* compatible SCI trigger is level */
                trigger = 3;
@@ -352,13 +352,13 @@ static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
         * If GSI is < 16, this will update its flags,
         * else it will create a new mp_irqs[] entry.
         */
-       mp_override_legacy_irq(gsi, polarity, trigger, gsi);
+       mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
 
        /*
         * stash over-ride to indicate we've been here
         * and for later update of acpi_fadt
         */
-       acpi_sci_override_gsi = gsi;
+       acpi_sci_override_gsi = bus_irq;
        return;
 }
 
@@ -376,7 +376,7 @@ acpi_parse_int_src_ovr(acpi_table_entry_header * header,
        acpi_table_print_madt_entry(header);
 
        if (intsrc->bus_irq == acpi_fadt.sci_int) {
-               acpi_sci_ioapic_setup(intsrc->global_irq,
+               acpi_sci_ioapic_setup(intsrc->bus_irq, intsrc->global_irq,
                                      intsrc->flags.polarity,
                                      intsrc->flags.trigger);
                return 0;
@@ -879,7 +879,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
         * pretend we got one so we can set the SCI flags.
         */
        if (!acpi_sci_override_gsi)
-               acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
+               acpi_sci_ioapic_setup(acpi_fadt.sci_int, acpi_fadt.sci_int, 0, 0);
 
        /* Fill in identity legacy mapings where no override */
        mp_config_acpi_legacy_irqs();
index 25db49ef1770aa11ce148f1d35789e85bc125151..20563e52c62248853e9d4fbc9af48461c27696bd 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
+#include <linux/cpu.h>
 
 #include <acpi/processor.h>
 #include <asm/acpi.h>
@@ -41,5 +42,124 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
                flags->bm_check = 1;
        }
 }
-
 EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
+
+/* The code below handles cstate entry with monitor-mwait pair on Intel*/
+
+struct cstate_entry_s {
+       struct {
+               unsigned int eax;
+               unsigned int ecx;
+       } states[ACPI_PROCESSOR_MAX_POWER];
+};
+static struct cstate_entry_s *cpu_cstate_entry;        /* per CPU ptr */
+
+static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
+
+#define MWAIT_SUBSTATE_MASK    (0xf)
+#define MWAIT_SUBSTATE_SIZE    (4)
+
+#define CPUID_MWAIT_LEAF (5)
+#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
+#define CPUID5_ECX_INTERRUPT_BREAK     (0x2)
+
+#define MWAIT_ECX_INTERRUPT_BREAK      (0x1)
+
+#define NATIVE_CSTATE_BEYOND_HALT      (2)
+
+int acpi_processor_ffh_cstate_probe(unsigned int cpu,
+               struct acpi_processor_cx *cx, struct acpi_power_register *reg)
+{
+       struct cstate_entry_s *percpu_entry;
+       struct cpuinfo_x86 *c = cpu_data + cpu;
+
+       cpumask_t saved_mask;
+       int retval;
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int edx_part;
+       unsigned int cstate_type; /* C-state type and not ACPI C-state type */
+       unsigned int num_cstate_subtype;
+
+       if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF )
+               return -1;
+
+       if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
+               return -1;
+
+       percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
+       percpu_entry->states[cx->index].eax = 0;
+       percpu_entry->states[cx->index].ecx = 0;
+
+       /* Make sure we are running on right CPU */
+       saved_mask = current->cpus_allowed;
+       retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
+       if (retval)
+               return -1;
+
+       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
+
+       /* Check whether this particular cx_type (in CST) is supported or not */
+       cstate_type = (cx->address >> MWAIT_SUBSTATE_SIZE) + 1;
+       edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
+       num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
+
+       retval = 0;
+       if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) {
+               retval = -1;
+               goto out;
+       }
+
+       /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
+       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
+           !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
+               retval = -1;
+               goto out;
+       }
+       percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
+
+       /* Use the hint in CST */
+       percpu_entry->states[cx->index].eax = cx->address;
+
+       if (!mwait_supported[cstate_type]) {
+               mwait_supported[cstate_type] = 1;
+               printk(KERN_DEBUG "Monitor-Mwait will be used to enter C-%d "
+                      "state\n", cx->type);
+       }
+
+out:
+       set_cpus_allowed(current, saved_mask);
+       return retval;
+}
+EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
+
+void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
+{
+       unsigned int cpu = smp_processor_id();
+       struct cstate_entry_s *percpu_entry;
+
+       percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
+       mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
+                             percpu_entry->states[cx->index].ecx);
+}
+EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
+
+static int __init ffh_cstate_init(void)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+       if (c->x86_vendor != X86_VENDOR_INTEL)
+               return -1;
+
+       cpu_cstate_entry = alloc_percpu(struct cstate_entry_s);
+       return 0;
+}
+
+static void __exit ffh_cstate_exit(void)
+{
+       if (cpu_cstate_entry) {
+               free_percpu(cpu_cstate_entry);
+               cpu_cstate_entry = NULL;
+       }
+}
+
+arch_initcall(ffh_cstate_init);
+__exitcall(ffh_cstate_exit);
index b42f2d914af3bb15eada724c854bc92581f47169..2af65858d3229b29cad60a7d25dc88a5a927bb5c 100644 (file)
@@ -540,11 +540,30 @@ static inline void apm_restore_cpus(cpumask_t mask)
  * Also, we KNOW that for the non error case of apm_bios_call, there
  * is no useful data returned in the low order 8 bits of eax.
  */
-#define APM_DO_CLI     \
-       if (apm_info.allow_ints) \
-               local_irq_enable(); \
-       else \
+
+static inline unsigned long __apm_irq_save(void)
+{
+       unsigned long flags;
+       local_save_flags(flags);
+       if (apm_info.allow_ints) {
+               if (irqs_disabled_flags(flags))
+                       local_irq_enable();
+       } else
+               local_irq_disable();
+
+       return flags;
+}
+
+#define apm_irq_save(flags) \
+       do { flags = __apm_irq_save(); } while (0)
+
+static inline void apm_irq_restore(unsigned long flags)
+{
+       if (irqs_disabled_flags(flags))
                local_irq_disable();
+       else if (irqs_disabled())
+               local_irq_enable();
+}
 
 #ifdef APM_ZERO_SEGS
 #      define APM_DECL_SEGS \
@@ -596,12 +615,11 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
        save_desc_40 = gdt[0x40 / 8];
        gdt[0x40 / 8] = bad_bios_desc;
 
-       local_save_flags(flags);
-       APM_DO_CLI;
+       apm_irq_save(flags);
        APM_DO_SAVE_SEGS;
        apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
        APM_DO_RESTORE_SEGS;
-       local_irq_restore(flags);
+       apm_irq_restore(flags);
        gdt[0x40 / 8] = save_desc_40;
        put_cpu();
        apm_restore_cpus(cpus);
@@ -640,12 +658,11 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
        save_desc_40 = gdt[0x40 / 8];
        gdt[0x40 / 8] = bad_bios_desc;
 
-       local_save_flags(flags);
-       APM_DO_CLI;
+       apm_irq_save(flags);
        APM_DO_SAVE_SEGS;
        error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
        APM_DO_RESTORE_SEGS;
-       local_irq_restore(flags);
+       apm_irq_restore(flags);
        gdt[0x40 / 8] = save_desc_40;
        put_cpu();
        apm_restore_cpus(cpus);
index 4f43047de40625ffabaf6125000eb5c59e535547..2d8703b7ce65ead94dc3d6d26cf37421ad87d236 100644 (file)
@@ -110,17 +110,15 @@ int therm_throt_process(int curr)
 
 #ifdef CONFIG_SYSFS
 /* Add/Remove thermal_throttle interface for CPU device */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device * sys_dev)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
 {
-       sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
-       return 0;
+       return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static __cpuinit int thermal_throttle_remove_dev(struct sys_device * sys_dev)
+static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
 {
-       sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
-       return 0;
+       return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
 }
 
 /* Mutex protecting device creation against CPU hotplug */
@@ -133,12 +131,14 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
 {
        unsigned int cpu = (unsigned long)hcpu;
        struct sys_device *sys_dev;
+       int err;
 
        sys_dev = get_cpu_sysdev(cpu);
        mutex_lock(&therm_cpu_lock);
        switch (action) {
        case CPU_ONLINE:
-               thermal_throttle_add_dev(sys_dev);
+               err = thermal_throttle_add_dev(sys_dev);
+               WARN_ON(err);
                break;
        case CPU_DEAD:
                thermal_throttle_remove_dev(sys_dev);
@@ -157,6 +157,7 @@ static struct notifier_block thermal_throttle_cpu_notifier =
 static __init int thermal_throttle_init_device(void)
 {
        unsigned int cpu = 0;
+       int err;
 
        if (!atomic_read(&therm_throt_en))
                return 0;
@@ -167,8 +168,10 @@ static __init int thermal_throttle_init_device(void)
        mutex_lock(&therm_cpu_lock);
 #endif
        /* connect live CPUs to sysfs */
-       for_each_online_cpu(cpu)
-               thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+       for_each_online_cpu(cpu) {
+               err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
+               WARN_ON(err);
+       }
 #ifdef CONFIG_HOTPLUG_CPU
        mutex_unlock(&therm_cpu_lock);
 #endif
index 477b24daff539bf3d5a1ab6be6e10f6f10dedc22..9a0060b92e32ace2911ae839a9d8b710e7458ec4 100644 (file)
@@ -109,7 +109,7 @@ static struct clocksource clocksource_pit = {
 
 static int __init init_pit_clocksource(void)
 {
-       if (num_possible_cpus() > 4) /* PIT does not scale! */
+       if (num_possible_cpus() > 1) /* PIT does not scale! */
                return 0;
 
        clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
index d53eafb6daa70f99c35b9b937acdea5a2a861500..62996cd17084245dafd53bcfab9555c20cc38c60 100644 (file)
@@ -113,7 +113,8 @@ void make_8259A_irq(unsigned int irq)
 {
        disable_irq_nosync(irq);
        io_apic_irqs &= ~(1<<irq);
-       set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+       set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
+                                     "XT");
        enable_irq(irq);
 }
 
@@ -369,8 +370,8 @@ void __init init_ISA_irqs (void)
                        /*
                         * 16 old-style INTA-cycle interrupts:
                         */
-                       set_irq_chip_and_handler(i, &i8259A_chip,
-                                                handle_level_irq);
+                       set_irq_chip_and_handler_name(i, &i8259A_chip,
+                                                     handle_level_irq, "XT");
                } else {
                        /*
                         * 'high' PCI IRQs filled in on demand
index cd082c36ca0368120e4da0b637028c9333d997c3..350192d6ab986f70314abb98e7e2a68a04d0b9e7 100644 (file)
@@ -1225,11 +1225,11 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
 {
        if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
                        trigger == IOAPIC_LEVEL)
-               set_irq_chip_and_handler(irq, &ioapic_chip,
-                                        handle_fasteoi_irq);
+               set_irq_chip_and_handler_name(irq, &ioapic_chip,
+                                        handle_fasteoi_irq, "fasteoi");
        else
-               set_irq_chip_and_handler(irq, &ioapic_chip,
-                                        handle_edge_irq);
+               set_irq_chip_and_handler_name(irq, &ioapic_chip,
+                                        handle_edge_irq, "edge");
        set_intr_gate(vector, interrupt[irq]);
 }
 
@@ -2235,7 +2235,8 @@ static inline void check_timer(void)
        printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
 
        disable_8259A_irq(0);
-       set_irq_chip_and_handler(0, &lapic_chip, handle_fasteoi_irq);
+       set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq,
+                                     "fasteio");
        apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector);   /* Fixed mode */
        enable_8259A_irq(0);
 
@@ -2541,7 +2542,8 @@ int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
 
        write_msi_msg(irq, &msg);
 
-       set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq);
+       set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
+                                     "edge");
 
        return 0;
 }
@@ -2594,7 +2596,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
 }
 #endif
 
-static struct hw_interrupt_type ht_irq_chip = {
+static struct irq_chip ht_irq_chip = {
        .name           = "PCI-HT",
        .mask           = mask_ht_irq,
        .unmask         = unmask_ht_irq,
@@ -2636,7 +2638,8 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
                write_ht_irq_low(irq, low);
                write_ht_irq_high(irq, high);
 
-               set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq);
+               set_irq_chip_and_handler_name(irq, &ht_irq_chip,
+                                             handle_edge_irq, "edge");
        }
        return vector;
 }
index 8cfc7dbec7b9f006118d49f1d6c7c65e6ab82213..3201d421090a0d2cf5237f4d55ef070d41bc48b9 100644 (file)
@@ -258,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v)
                        seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
                seq_printf(p, " %8s", irq_desc[i].chip->name);
-               seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq));
+               seq_printf(p, "-%-8s", irq_desc[i].name);
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
index 9b9479768d5ebcda920044781234b8aa2d4183d3..c4d0291b519f836db7cfaccdfb8f375ffb0751ea 100644 (file)
@@ -656,14 +656,18 @@ static struct attribute_group mc_attr_group = {
 
 static int mc_sysdev_add(struct sys_device *sys_dev)
 {
-       int cpu = sys_dev->id;
+       int err, cpu = sys_dev->id;
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 
        if (!cpu_online(cpu))
                return 0;
+
        pr_debug("Microcode:CPU %d added\n", cpu);
        memset(uci, 0, sizeof(*uci));
-       sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
+
+       err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group);
+       if (err)
+               return err;
 
        microcode_init_cpu(cpu);
        return 0;
index b0a07801d9df90abf71f923fa27e55269cacf319..57d375900afb0643cf64f10b52ed49462131d001 100644 (file)
@@ -236,20 +236,28 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
  * We execute MONITOR against need_resched and enter optimized wait state
  * through MWAIT. Whenever someone changes need_resched, we would be woken
  * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
  */
-static void mwait_idle(void)
+void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 {
-       local_irq_enable();
-
-       while (!need_resched()) {
+       if (!need_resched()) {
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                smp_mb();
-               if (need_resched())
-                       break;
-               __mwait(0, 0);
+               if (!need_resched())
+                       __mwait(eax, ecx);
        }
 }
 
+/* Default MONITOR/MWAIT with no hints, used for default C1 state */
+static void mwait_idle(void)
+{
+       local_irq_enable();
+       while (!need_resched())
+               mwait_idle_with_hints(0, 0);
+}
+
 void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
 {
        if (cpu_has(c, X86_FEATURE_MWAIT)) {
index 000cf03751fe9fd88280ea6938837c3f49126cac..519e63c3c1306abb1a59b31e017d76e4e671b01d 100644 (file)
@@ -1083,16 +1083,15 @@ static unsigned long __init setup_memory(void)
 
 void __init zone_sizes_init(void)
 {
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+       max_zone_pfns[ZONE_DMA] =
+               virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 #ifdef CONFIG_HIGHMEM
-       unsigned long max_zone_pfns[MAX_NR_ZONES] = {
-                       virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT,
-                       max_low_pfn,
-                       highend_pfn};
+       max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
        add_active_range(0, 0, highend_pfn);
 #else
-       unsigned long max_zone_pfns[MAX_NR_ZONES] = {
-                       virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT,
-                       max_low_pfn};
        add_active_range(0, 0, max_low_pfn);
 #endif
 
index 7e639f78b0b9f274ea055ef42275506b262a769c..2697e9210e92773a28d5b2e9ea7174433465acef 100644 (file)
@@ -318,3 +318,4 @@ ENTRY(sys_call_table)
        .long sys_vmsplice
        .long sys_move_pages
        .long sys_getcpu
+       .long sys_epoll_pwait
index b8fa0a8b2e4733170d0b72cff576d7ddb8c750cd..fbc95828cd7493082aeeedc0396a762db056097b 100644 (file)
@@ -349,8 +349,8 @@ static int tsc_update_callback(void)
        int change = 0;
 
        /* check to see if we should switch to the safe clocksource: */
-       if (clocksource_tsc.rating != 50 && check_tsc_unstable()) {
-               clocksource_tsc.rating = 50;
+       if (clocksource_tsc.rating != 0 && check_tsc_unstable()) {
+               clocksource_tsc.rating = 0;
                clocksource_reselect();
                change = 1;
        }
@@ -461,7 +461,7 @@ static int __init init_tsc_clocksource(void)
                                                        clocksource_tsc.shift);
                /* lower the rating if we already know its unstable: */
                if (check_tsc_unstable())
-                       clocksource_tsc.rating = 50;
+                       clocksource_tsc.rating = 0;
 
                init_timer(&verify_tsc_freq_timer);
                verify_tsc_freq_timer.function = verify_tsc_freq;
index 08502fc6d0cb8d0fc819682d26b499e5aab08efb..258df6b4d7d7646ec5bd7a9843c669be14327928 100644 (file)
@@ -179,7 +179,7 @@ __clear_user(void __user *to, unsigned long n)
 EXPORT_SYMBOL(__clear_user);
 
 /**
- * strlen_user: - Get the size of a string in user space.
+ * strnlen_user: - Get the size of a string in user space.
  * @s: The string to measure.
  * @n: The maximum valid length
  *
index c639d30d8bdc05678b445cdc7cd7d1899347a0f9..8fe7e4593d5fe7aca75b81d35bbb70863427bcb2 100644 (file)
@@ -44,7 +44,7 @@ struct voyager_SUS *voyager_SUS = NULL;
 
 #ifdef CONFIG_SMP
 static void
-voyager_dump(int dummy1, struct pt_regs *dummy2, struct tty_struct *dummy3)
+voyager_dump(int dummy1, struct tty_struct *dummy3)
 {
        /* get here via a sysrq */
        voyager_smp_dump();
@@ -166,7 +166,7 @@ voyager_memory_detect(int region, __u32 *start, __u32 *length)
  * off the timer tick to the SMP code, since the VIC doesn't have an
  * internal timer (The QIC does, but that's another story). */
 void
-voyager_timer_interrupt(struct pt_regs *regs)
+voyager_timer_interrupt(void)
 {
        if((jiffies & 0x3ff) == 0) {
 
@@ -202,7 +202,7 @@ voyager_timer_interrupt(struct pt_regs *regs)
                }
        }
 #ifdef CONFIG_SMP
-       smp_vic_timer_interrupt(regs);
+       smp_vic_timer_interrupt();
 #endif
 }
 
index d42422fc4af3bac44b6a7c4498d7ebfd381039c1..f3fea2ad50fea81c5a4a3decfcdfc52d041b5f61 100644 (file)
@@ -85,8 +85,8 @@ static int ack_QIC_CPI(__u8 cpi);
 static void ack_special_QIC_CPI(__u8 cpi);
 static void ack_VIC_CPI(__u8 cpi);
 static void send_CPI_allbutself(__u8 cpi);
-static void enable_vic_irq(unsigned int irq);
-static void disable_vic_irq(unsigned int irq);
+static void mask_vic_irq(unsigned int irq);
+static void unmask_vic_irq(unsigned int irq);
 static unsigned int startup_vic_irq(unsigned int irq);
 static void enable_local_vic_irq(unsigned int irq);
 static void disable_local_vic_irq(unsigned int irq);
@@ -205,15 +205,12 @@ ack_CPI(__u8 cpi)
 /* The VIC IRQ descriptors -- these look almost identical to the
  * 8259 IRQs except that masks and things must be kept per processor
  */
-static struct hw_interrupt_type vic_irq_type = {
-       .typename = "VIC-level",
-       .startup = startup_vic_irq,
-       .shutdown = disable_vic_irq,
-       .enable = enable_vic_irq,
-       .disable = disable_vic_irq,
-       .ack = before_handle_vic_irq,
-       .end = after_handle_vic_irq,
-       .set_affinity = set_vic_irq_affinity,
+static struct irq_chip vic_chip = {
+       .name           = "VIC",
+       .startup        = startup_vic_irq,
+       .mask           = mask_vic_irq,
+       .unmask         = unmask_vic_irq,
+       .set_affinity   = set_vic_irq_affinity,
 };
 
 /* used to count up as CPUs are brought on line (starts at 0) */
@@ -1144,9 +1141,9 @@ smp_apic_timer_interrupt(struct pt_regs *regs)
 fastcall void
 smp_qic_timer_interrupt(struct pt_regs *regs)
 {
-       ack_QIC_CPI(QIC_TIMER_CPI);
        struct pt_regs *old_regs = set_irq_regs(regs);
-       wrapper_smp_local_timer_interrupt(void);
+       ack_QIC_CPI(QIC_TIMER_CPI);
+       wrapper_smp_local_timer_interrupt();
        set_irq_regs(old_regs);
 }
 
@@ -1270,12 +1267,10 @@ smp_send_stop(void)
 /* this function is triggered in time.c when a clock tick fires
  * we need to re-broadcast the tick to all CPUs */
 void
-smp_vic_timer_interrupt(struct pt_regs *regs)
+smp_vic_timer_interrupt(void)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
        send_CPI_allbutself(VIC_TIMER_CPI);
        smp_local_timer_interrupt();
-       set_irq_regs(old_regs);
 }
 
 /* local (per CPU) timer interrupt.  It does both profiling and
@@ -1310,7 +1305,7 @@ smp_local_timer_interrupt(void)
                                                per_cpu(prof_counter, cpu);
                }
 
-               update_process_times(user_mode_vm(irq_regs));
+               update_process_times(user_mode_vm(get_irq_regs()));
        }
 
        if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
@@ -1397,6 +1392,17 @@ setup_profiling_timer(unsigned int multiplier)
        return 0;
 }
 
+/* This is a bit of a mess, but forced on us by the genirq changes
+ * there's no genirq handler that really does what voyager wants
+ * so hack it up with the simple IRQ handler */
+static void fastcall
+handle_vic_irq(unsigned int irq, struct irq_desc *desc)
+{
+       before_handle_vic_irq(irq);
+       handle_simple_irq(irq, desc);
+       after_handle_vic_irq(irq);
+}
+
 
 /*  The CPIs are handled in the per cpu 8259s, so they must be
  *  enabled to be received: FIX: enabling the CPIs in the early
@@ -1433,7 +1439,7 @@ smp_intr_init(void)
         * This is for later: first 16 correspond to PC IRQs; next 16
         * are Primary MC IRQs and final 16 are Secondary MC IRQs */
        for(i = 0; i < 48; i++)
-               irq_desc[i].chip = &vic_irq_type;
+               set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
 }
 
 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
@@ -1531,7 +1537,7 @@ ack_VIC_CPI(__u8 cpi)
 static unsigned int
 startup_vic_irq(unsigned int irq)
 {
-       enable_vic_irq(irq);
+       unmask_vic_irq(irq);
 
        return 0;
 }
@@ -1558,7 +1564,7 @@ startup_vic_irq(unsigned int irq)
  *    adjust their masks accordingly.  */
 
 static void
-enable_vic_irq(unsigned int irq)
+unmask_vic_irq(unsigned int irq)
 {
        /* linux doesn't to processor-irq affinity, so enable on
         * all CPUs we know about */
@@ -1567,7 +1573,7 @@ enable_vic_irq(unsigned int irq)
        __u32 processorList = 0;
        unsigned long flags;
 
-       VDEBUG(("VOYAGER: enable_vic_irq(%d) CPU%d affinity 0x%lx\n",
+       VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
                irq, cpu, cpu_irq_affinity[cpu]));
        spin_lock_irqsave(&vic_irq_lock, flags);
        for_each_online_cpu(real_cpu) {
@@ -1591,7 +1597,7 @@ enable_vic_irq(unsigned int irq)
 }
 
 static void
-disable_vic_irq(unsigned int irq)
+mask_vic_irq(unsigned int irq)
 {
        /* lazy disable, do nothing */
 }
@@ -1819,7 +1825,7 @@ set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
         * disabled again as it comes in (voyager lazy disable).  If
         * the affinity map is tightened to disable the interrupt on a
         * cpu, it will be pushed off when it comes in */
-       enable_vic_irq(irq);
+       unmask_vic_irq(irq);
 }
 
 static void
index 455597db84dffe2d078db1011d21653a8568650c..ddbdb0336f28f1c6d4d303d11055a929b7daf286 100644 (file)
@@ -356,11 +356,12 @@ void __init numa_kva_reserve(void)
 void __init zone_sizes_init(void)
 {
        int nid;
-       unsigned long max_zone_pfns[MAX_NR_ZONES] = {
-               virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT,
-               max_low_pfn,
-               highend_pfn
-       };
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+       max_zone_pfns[ZONE_DMA] =
+               virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+       max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
 
        /* If SRAT has not registered memory, register it now */
        if (find_max_pfn_with_active_regions() == 0) {
index daf977ff2920e2c88aee4c6398a7d135bc22d802..82deaa3a7c4806adecb70daacdbfc7d2997105e0 100644 (file)
@@ -233,6 +233,7 @@ paging_init (void)
        efi_memmap_walk(count_pages, &num_physpages);
 
        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
        max_zone_pfns[ZONE_DMA] = max_dma;
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
index d497b6b0f5b2c08d4be5cd50478399db993a2dc8..96722cb1b49ddbc296951fa1007b2f418d910dea 100644 (file)
@@ -709,6 +709,7 @@ void __init paging_init(void)
                        max_pfn = mem_data[node].max_pfn;
        }
 
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
        max_zone_pfns[ZONE_DMA] = max_dma;
        max_zone_pfns[ZONE_NORMAL] = max_pfn;
        free_area_init_nodes(max_zone_pfns);
index 3f35ab3d2dc28060b6030c76b37726887e8edd5f..0e7778be33ccc93503e5b28a4f902f01e13674b4 100644 (file)
@@ -369,10 +369,10 @@ static void c_stop(struct seq_file *m, void *v)
 }
 
 struct seq_operations cpuinfo_op = {
-       start:  c_start,
-       next:   c_next,
-       stop:   c_stop,
-       show:   show_cpuinfo,
+       .start = c_start,
+       .next = c_next,
+       .stop = c_stop,
+       .show = show_cpuinfo,
 };
 #endif /* CONFIG_PROC_FS */
 
index 67dbbdc9d111bdc89e0b621adf2d9ef438030915..6b2d77da06830c734c01d588881694c115e84c53 100644 (file)
@@ -86,7 +86,7 @@ void __init init_IRQ(void)
        /* INT0 : LAN controller (RTL8019AS) */
        irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
        irq_desc[M32R_IRQ_INT0].chip = &mappi_irq_type;
-       irq_desc[M32R_IRQ_INT0].action = 0;
+       irq_desc[M32R_IRQ_INT0].action = NULL;
        irq_desc[M32R_IRQ_INT0].depth = 1;
        icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
        disable_mappi_irq(M32R_IRQ_INT0);
@@ -95,7 +95,7 @@ void __init init_IRQ(void)
        /* MFT2 : system timer */
        irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
        irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type;
-       irq_desc[M32R_IRQ_MFT2].action = 0;
+       irq_desc[M32R_IRQ_MFT2].action = NULL;
        irq_desc[M32R_IRQ_MFT2].depth = 1;
        icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
        disable_mappi_irq(M32R_IRQ_MFT2);
@@ -104,7 +104,7 @@ void __init init_IRQ(void)
        /* SIO0_R : uart receive data */
        irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
        irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type;
-       irq_desc[M32R_IRQ_SIO0_R].action = 0;
+       irq_desc[M32R_IRQ_SIO0_R].action = NULL;
        irq_desc[M32R_IRQ_SIO0_R].depth = 1;
        icu_data[M32R_IRQ_SIO0_R].icucr = 0;
        disable_mappi_irq(M32R_IRQ_SIO0_R);
@@ -112,7 +112,7 @@ void __init init_IRQ(void)
        /* SIO0_S : uart send data */
        irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
        irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type;
-       irq_desc[M32R_IRQ_SIO0_S].action = 0;
+       irq_desc[M32R_IRQ_SIO0_S].action = NULL;
        irq_desc[M32R_IRQ_SIO0_S].depth = 1;
        icu_data[M32R_IRQ_SIO0_S].icucr = 0;
        disable_mappi_irq(M32R_IRQ_SIO0_S);
@@ -120,7 +120,7 @@ void __init init_IRQ(void)
        /* SIO1_R : uart receive data */
        irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
        irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type;
-       irq_desc[M32R_IRQ_SIO1_R].action = 0;
+       irq_desc[M32R_IRQ_SIO1_R].action = NULL;
        irq_desc[M32R_IRQ_SIO1_R].depth = 1;
        icu_data[M32R_IRQ_SIO1_R].icucr = 0;
        disable_mappi_irq(M32R_IRQ_SIO1_R);
@@ -128,7 +128,7 @@ void __init init_IRQ(void)
        /* SIO1_S : uart send data */
        irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
        irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type;
-       irq_desc[M32R_IRQ_SIO1_S].action = 0;
+       irq_desc[M32R_IRQ_SIO1_S].action = NULL;
        irq_desc[M32R_IRQ_SIO1_S].depth = 1;
        icu_data[M32R_IRQ_SIO1_S].icucr = 0;
        disable_mappi_irq(M32R_IRQ_SIO1_S);
@@ -138,7 +138,7 @@ void __init init_IRQ(void)
        /* INT1 : pccard0 interrupt */
        irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
        irq_desc[M32R_IRQ_INT1].chip = &mappi_irq_type;
-       irq_desc[M32R_IRQ_INT1].action = 0;
+       irq_desc[M32R_IRQ_INT1].action = NULL;
        irq_desc[M32R_IRQ_INT1].depth = 1;
        icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
        disable_mappi_irq(M32R_IRQ_INT1);
@@ -146,7 +146,7 @@ void __init init_IRQ(void)
        /* INT2 : pccard1 interrupt */
        irq_desc[M32R_IRQ_INT2].status = IRQ_DISABLED;
        irq_desc[M32R_IRQ_INT2].chip = &mappi_irq_type;
-       irq_desc[M32R_IRQ_INT2].action = 0;
+       irq_desc[M32R_IRQ_INT2].action = NULL;
        irq_desc[M32R_IRQ_INT2].depth = 1;
        icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
        disable_mappi_irq(M32R_IRQ_INT2);
index a9174efe80cbf95379d3891de21f0c3b660eeb92..b60cea4aebaa6d9e663cae8eef42084d4b30fd26 100644 (file)
@@ -33,7 +33,7 @@
 int do_signal(struct pt_regs *, sigset_t *);
 
 asmlinkage int
-sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
+sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
                  unsigned long r2, unsigned long r3, unsigned long r4,
                  unsigned long r5, unsigned long r6, struct pt_regs *regs)
 {
@@ -78,8 +78,8 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
 struct rt_sigframe
 {
        int sig;
-       struct siginfo *pinfo;
-       void *puc;
+       struct siginfo __user *pinfo;
+       void __user *puc;
        struct siginfo info;
        struct ucontext uc;
 //     struct _fpstate fpstate;
index 722e21f556dc53701a5c0989b77ea36c5f0ec8a7..360129174b2bd17aaeed1fcc37c91e80a8100d05 100644 (file)
@@ -231,7 +231,7 @@ void smp_flush_tlb_all(void)
        local_irq_save(flags);
        __flush_tlb_all();
        local_irq_restore(flags);
-       smp_call_function(flush_tlb_all_ipi, 0, 1, 1);
+       smp_call_function(flush_tlb_all_ipi, NULL, 1, 1);
        preempt_enable();
 }
 
index b567351f3c5236d41e0b108a2ba516a86fc170fd..b4e7bcb43540444d6db2f4a56855711cc56ffe02 100644 (file)
@@ -31,7 +31,7 @@
 /*
  * sys_tas() - test-and-set
  */
-asmlinkage int sys_tas(int *addr)
+asmlinkage int sys_tas(int __user *addr)
 {
        int oldval;
 
@@ -90,7 +90,7 @@ sys_pipe(unsigned long r0, unsigned long r1, unsigned long r2,
 
        error = do_pipe(fd);
        if (!error) {
-               if (copy_to_user((void *)r0, (void *)fd, 2*sizeof(int)))
+               if (copy_to_user((void __user *)r0, fd, 2*sizeof(int)))
                        error = -EFAULT;
        }
        return error;
@@ -201,7 +201,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
        }
 }
 
-asmlinkage int sys_uname(struct old_utsname * name)
+asmlinkage int sys_uname(struct old_utsname __user * name)
 {
        int err;
        if (!name)
index c1daf2c40c7c4fb05af0f61b503fe9bf20a92bc1..97e0b1c0830e81cf218d4e61c4dbbc0a1de8e0ff 100644 (file)
@@ -268,7 +268,7 @@ static __inline__ void do_trap(int trapnr, int signr, const char * str,
 #define DO_ERROR(trapnr, signr, str, name) \
 asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
 { \
-       do_trap(trapnr, signr, 0, regs, error_code, NULL); \
+       do_trap(trapnr, signr, NULL, regs, error_code, NULL); \
 }
 
 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
index f9636e84e6a4dac70f060a5fa62df6cfd27ab777..6fc69c74fe2eec9b8c9c646baeffd8fe22942997 100644 (file)
@@ -1,61 +1,10 @@
 #include <linux/module.h>
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-
-#include <asm/setup.h>
-#include <asm/machdep.h>
-#include <asm/pgalloc.h>
-#include <asm/irq.h>
-#include <asm/io.h>
 #include <asm/semaphore.h>
-#include <asm/checksum.h>
 
 asmlinkage long long __ashldi3 (long long, int);
 asmlinkage long long __ashrdi3 (long long, int);
 asmlinkage long long __lshrdi3 (long long, int);
 asmlinkage long long __muldi3 (long long, long long);
-extern char m68k_debug_device[];
-
-/* platform dependent support */
-
-EXPORT_SYMBOL(m68k_machtype);
-EXPORT_SYMBOL(m68k_cputype);
-EXPORT_SYMBOL(m68k_is040or060);
-EXPORT_SYMBOL(m68k_realnum_memory);
-EXPORT_SYMBOL(m68k_memory);
-#ifndef CONFIG_SUN3
-EXPORT_SYMBOL(cache_push);
-EXPORT_SYMBOL(cache_clear);
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
-EXPORT_SYMBOL(mm_vtop);
-EXPORT_SYMBOL(mm_ptov);
-EXPORT_SYMBOL(mm_end_of_chunk);
-#else
-EXPORT_SYMBOL(m68k_memoffset);
-#endif /* !CONFIG_SINGLE_MEMORY_CHUNK */
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(kernel_set_cachemode);
-#endif /* !CONFIG_SUN3 */
-EXPORT_SYMBOL(m68k_debug_device);
-EXPORT_SYMBOL(mach_hwclk);
-EXPORT_SYMBOL(mach_get_ss);
-EXPORT_SYMBOL(mach_get_rtc_pll);
-EXPORT_SYMBOL(mach_set_rtc_pll);
-#ifdef CONFIG_INPUT_M68K_BEEP_MODULE
-EXPORT_SYMBOL(mach_beep);
-#endif
-EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(dump_thread);
-EXPORT_SYMBOL(kernel_thread);
-#ifdef CONFIG_VME
-EXPORT_SYMBOL(vme_brdtype);
-#endif
 
 /* The following are special because they're not called
    explicitly (the C compiler generates them).  Fortunately,
index 45a46646c1b353eec5b565e835d99d16d8df5edc..99fc1226f7f804ed40cf875fca1118d78347e8ac 100644 (file)
@@ -187,6 +187,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
        set_fs (fs);
        return pid;
 }
+EXPORT_SYMBOL(kernel_thread);
 
 void flush_thread(void)
 {
@@ -221,13 +222,13 @@ asmlinkage int m68k_clone(struct pt_regs *regs)
 {
        unsigned long clone_flags;
        unsigned long newsp;
-       int *parent_tidptr, *child_tidptr;
+       int __user *parent_tidptr, *child_tidptr;
 
        /* syscall2 puts clone_flags in d1 and usp in d2 */
        clone_flags = regs->d1;
        newsp = regs->d2;
-       parent_tidptr = (int *)regs->d3;
-       child_tidptr = (int *)regs->d4;
+       parent_tidptr = (int __user *)regs->d3;
+       child_tidptr = (int __user *)regs->d4;
        if (!newsp)
                newsp = rdusp();
        return do_fork(clone_flags, newsp, regs, 0,
@@ -311,6 +312,7 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
                : "memory");
        return 1;
 }
+EXPORT_SYMBOL(dump_fpu);
 
 /*
  * fill in the user structure for a core dump..
@@ -357,11 +359,12 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
        /* dump floating point stuff */
        dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp);
 }
+EXPORT_SYMBOL(dump_thread);
 
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(char *name, char **argv, char **envp)
+asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
 {
        int error;
        char * filename;
index 42d5b85f33509cf71fa77caeced3cdd6ad7e7bfa..9af3ee0e555d7be8ace62282659b31831cf3642f 100644 (file)
 
 unsigned long m68k_machtype;
 unsigned long m68k_cputype;
+EXPORT_SYMBOL(m68k_machtype);
+EXPORT_SYMBOL(m68k_cputype);
 unsigned long m68k_fputype;
 unsigned long m68k_mmutype;
 #ifdef CONFIG_VME
 unsigned long vme_brdtype;
+EXPORT_SYMBOL(vme_brdtype);
 #endif
 
 int m68k_is040or060;
+EXPORT_SYMBOL(m68k_is040or060);
 
 extern int end;
 extern unsigned long availmem;
 
 int m68k_num_memory;
 int m68k_realnum_memory;
+EXPORT_SYMBOL(m68k_realnum_memory);
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
 unsigned long m68k_memoffset;
+EXPORT_SYMBOL(m68k_memoffset);
+#endif
 struct mem_info m68k_memory[NUM_MEMINFO];
+EXPORT_SYMBOL(m68k_memory);
 
 static struct mem_info m68k_ramdisk;
 
 static char m68k_command_line[CL_SIZE];
 
 char m68k_debug_device[6] = "";
+EXPORT_SYMBOL(m68k_debug_device);
 
 void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL;
 /* machine dependent irq functions */
@@ -72,10 +82,14 @@ int (*mach_get_hardware_list) (char *buffer);
 /* machine dependent timer functions */
 unsigned long (*mach_gettimeoffset) (void);
 int (*mach_hwclk) (int, struct rtc_time*);
+EXPORT_SYMBOL(mach_hwclk);
 int (*mach_set_clock_mmss) (unsigned long);
 unsigned int (*mach_get_ss)(void);
 int (*mach_get_rtc_pll)(struct rtc_pll_info *);
 int (*mach_set_rtc_pll)(struct rtc_pll_info *);
+EXPORT_SYMBOL(mach_get_ss);
+EXPORT_SYMBOL(mach_get_rtc_pll);
+EXPORT_SYMBOL(mach_set_rtc_pll);
 void (*mach_reset)( void );
 void (*mach_halt)( void );
 void (*mach_power_off)( void );
@@ -89,6 +103,7 @@ void (*mach_l2_flush) (int);
 #endif
 #if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
 void (*mach_beep)(unsigned int, unsigned int);
+EXPORT_SYMBOL(mach_beep);
 #endif
 #if defined(CONFIG_ISA) && defined(MULTI_ISA)
 int isa_type;
index 4569406a2e1f8d8855e8d9ae57ebd698c7ad9b1f..759fa244e6cd60cc5e226c486525e64b43aae190 100644 (file)
@@ -326,13 +326,13 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
 
        switch (wbs & WBSIZ_040) {
        case BA_SIZE_BYTE:
-               res = put_user(wbd & 0xff, (char *)wba);
+               res = put_user(wbd & 0xff, (char __user *)wba);
                break;
        case BA_SIZE_WORD:
-               res = put_user(wbd & 0xffff, (short *)wba);
+               res = put_user(wbd & 0xffff, (short __user *)wba);
                break;
        case BA_SIZE_LONG:
-               res = put_user(wbd, (int *)wba);
+               res = put_user(wbd, (int __user *)wba);
                break;
        }
 
index f46f049d29ff34542acf24257a64809f728f9c26..b54ef1726c557f827665ae3055eb2c1f8868c2c7 100644 (file)
@@ -7,6 +7,7 @@
  *          used by other architectures                /Roman Zippel
  */
 
+#include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
@@ -219,6 +220,7 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
 
        return (void __iomem *)retaddr;
 }
+EXPORT_SYMBOL(__ioremap);
 
 /*
  * Unmap a ioremap()ed region again
@@ -234,6 +236,7 @@ void iounmap(void __iomem *addr)
        free_io_area((__force void *)addr);
 #endif
 }
+EXPORT_SYMBOL(iounmap);
 
 /*
  * __iounmap unmaps nearly everything, so be careful
@@ -360,3 +363,4 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
 
        flush_tlb_all();
 }
+EXPORT_SYMBOL(kernel_set_cachemode);
index a0c095e17222df5c48c56279d7181d11b4e4e000..0f88812822b1315f83585600bef314921edab9da 100644 (file)
@@ -4,6 +4,7 @@
  *  Copyright (C) 1995  Hamish Macdonald
  */
 
+#include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
@@ -157,9 +158,8 @@ unsigned long mm_vtop(unsigned long vaddr)
 
        return -1;
 }
-#endif
+EXPORT_SYMBOL(mm_vtop);
 
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
 unsigned long mm_ptov (unsigned long paddr)
 {
        int i = 0;
@@ -185,6 +185,7 @@ unsigned long mm_ptov (unsigned long paddr)
 #endif
        return -1;
 }
+EXPORT_SYMBOL(mm_ptov);
 #endif
 
 /* invalidate page in both caches */
@@ -298,6 +299,7 @@ void cache_clear (unsigned long paddr, int len)
        mach_l2_flush(0);
 #endif
 }
+EXPORT_SYMBOL(cache_clear);    /* probably can be unexported */
 
 
 /*
@@ -350,6 +352,7 @@ void cache_push (unsigned long paddr, int len)
        mach_l2_flush(1);
 #endif
 }
+EXPORT_SYMBOL(cache_push);     /* probably can be unexported */
 
 #ifndef CONFIG_SINGLE_MEMORY_CHUNK
 int mm_end_of_chunk (unsigned long addr, int len)
@@ -361,4 +364,5 @@ int mm_end_of_chunk (unsigned long addr, int len)
                        return 1;
        return 0;
 }
+EXPORT_SYMBOL(mm_end_of_chunk);
 #endif
index 7f0d86f3fe73a245e7773bb4370684881686b554..1af24cb5bfe136b4f8e18337ad72d8609bc4e755 100644 (file)
@@ -8,6 +8,7 @@
  * for more details.
  */
 
+#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
@@ -59,7 +60,7 @@ static inline void do_pmeg_mapin(unsigned long phys, unsigned long virt,
        }
 }
 
-void *sun3_ioremap(unsigned long phys, unsigned long size,
+void __iomem *sun3_ioremap(unsigned long phys, unsigned long size,
                   unsigned long type)
 {
        struct vm_struct *area;
@@ -101,22 +102,24 @@ void *sun3_ioremap(unsigned long phys, unsigned long size,
                virt += seg_pages * PAGE_SIZE;
        }
 
-       return (void *)ret;
+       return (void __iomem *)ret;
 
 }
 
 
-void *__ioremap(unsigned long phys, unsigned long size, int cache)
+void __iomem *__ioremap(unsigned long phys, unsigned long size, int cache)
 {
 
        return sun3_ioremap(phys, size, SUN3_PAGE_TYPE_IO);
 
 }
+EXPORT_SYMBOL(__ioremap);
 
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
 {
        vfree((void *)(PAGE_MASK & (unsigned long)addr));
 }
+EXPORT_SYMBOL(iounmap);
 
 /* sun3_map_test(addr, val) -- Reads a byte from addr, storing to val,
  * trapping the potential read fault.  Returns 0 if the access faulted,
index 4d4f0695d985bb92c60e053131821d0e32d7ccf4..be1a8470d63685b4742dbe1d913ffcf8ecdffbfd 100644 (file)
@@ -2,6 +2,6 @@
 # Makefile for Linux arch/m68k/sun3 source directory
 #
 
-obj-y  := sun3_ksyms.o sun3ints.o sun3dvma.o sbus.o idprom.o
+obj-y  := sun3ints.o sun3dvma.o sbus.o idprom.o
 
 obj-$(CONFIG_SUN3) += config.o mmu_emu.o leds.o dvma.o intersil.o
index 02c1fee6fe744a359b84f3f0967d836d46b78061..dca6ab6a4ede9ac13c31a46d3a7e1bae46a576a7 100644 (file)
@@ -6,6 +6,7 @@
  * Sun3/3x models added by David Monro (davidm@psrg.cs.usyd.edu.au)
  */
 
+#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/init.h>
@@ -16,6 +17,8 @@
 #include <asm/machines.h>  /* Fun with Sun released architectures. */
 
 struct idprom *idprom;
+EXPORT_SYMBOL(idprom);
+
 static struct idprom idprom_buffer;
 
 /* Here is the master table of Sun machines which use some implementation
diff --git a/arch/m68k/sun3/sun3_ksyms.c b/arch/m68k/sun3/sun3_ksyms.c
deleted file mode 100644 (file)
index 43e5a9a..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#include <linux/module.h>
-#include <linux/types.h>
-#include <asm/dvma.h>
-#include <asm/idprom.h>
-
-/*
- * Add things here when you find the need for it.
- */
-EXPORT_SYMBOL(dvma_map_align);
-EXPORT_SYMBOL(dvma_unmap);
-EXPORT_SYMBOL(dvma_malloc_align);
-EXPORT_SYMBOL(dvma_free);
-EXPORT_SYMBOL(idprom);
index a2bc2da7f8f0ceba005c7a43a8344d59da4cb78c..8709677fa0255aa03dbe512e8e558c224f546f81 100644 (file)
@@ -6,6 +6,7 @@
  * Contains common routines for sun3/sun3x DVMA management.
  */
 
+#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/list.h>
@@ -312,6 +313,7 @@ inline unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
        BUG();
        return 0;
 }
+EXPORT_SYMBOL(dvma_map_align);
 
 void dvma_unmap(void *baddr)
 {
@@ -327,7 +329,7 @@ void dvma_unmap(void *baddr)
        return;
 
 }
-
+EXPORT_SYMBOL(dvma_unmap);
 
 void *dvma_malloc_align(unsigned long len, unsigned long align)
 {
@@ -367,6 +369,7 @@ void *dvma_malloc_align(unsigned long len, unsigned long align)
        return (void *)vaddr;
 
 }
+EXPORT_SYMBOL(dvma_malloc_align);
 
 void dvma_free(void *vaddr)
 {
@@ -374,3 +377,4 @@ void dvma_free(void *vaddr)
        return;
 
 }
+EXPORT_SYMBOL(dvma_free);
index 617e43ec95ae48eb6e89dd44f64df34392dcabe3..4603f4f3c935f282b88d0f487775fd9112ba4ee6 100644 (file)
@@ -296,10 +296,39 @@ ENTRY(sys_call_table)
        .long sys_mq_notify     /* 275 */
        .long sys_mq_getsetattr
        .long sys_waitid
-       .long sys_ni_syscall    /* sys_setaltroot */
-       .long sys_ni_syscall    /* sys_add_key */
-       .long sys_ni_syscall    /* 280 */ /* sys_request_key */
-       .long sys_ni_syscall    /* sys_keyctl */
+       .long sys_ni_syscall    /* for sys_vserver */
+       .long sys_add_key
+       .long sys_request_key   /* 280 */
+       .long sys_keyctl
+       .long sys_ioprio_set
+       .long sys_ioprio_get
+       .long sys_inotify_init
+       .long sys_inotify_add_watch     /* 285 */
+       .long sys_inotify_rm_watch
+       .long sys_migrate_pages
+       .long sys_openat
+       .long sys_mkdirat
+       .long sys_mknodat               /* 290 */
+       .long sys_fchownat
+       .long sys_futimesat
+       .long sys_fstatat64
+       .long sys_unlinkat
+       .long sys_renameat              /* 295 */
+       .long sys_linkat
+       .long sys_symlinkat
+       .long sys_readlinkat
+       .long sys_fchmodat
+       .long sys_faccessat             /* 300 */
+       .long sys_ni_syscall            /* Reserved for pselect6 */
+       .long sys_ni_syscall            /* Reserved for ppoll */
+       .long sys_unshare
+       .long sys_set_robust_list
+       .long sys_get_robust_list       /* 305 */
+       .long sys_splice
+       .long sys_sync_file_range
+       .long sys_tee
+       .long sys_vmsplice
+       .long sys_move_pages            /* 310 */
 
        .rept NR_syscalls-(.-sys_call_table)/4
                .long sys_ni_syscall
index 2124350ab94dfa88e609e7fe193f09d45c6c5bf7..641aa30b36385a6860f304709d82b5ab7871345a 100644 (file)
@@ -91,8 +91,17 @@ cflags-y += -ffreestanding
 # carefully avoid to add it redundantly because gcc 3.3/3.4 complains
 # when fed the toolchain default!
 #
-cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB -D__MIPSEB__)
-cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL -D__MIPSEL__)
+# Certain gcc versions upto gcc 4.1.1 (probably 4.2-subversion as of
+# 2006-10-10 don't properly change the the predefined symbols if -EB / -EL
+# are used, so we kludge that here.  A bug has been filed at
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=29413.
+#
+undef-all += -UMIPSEB -U_MIPSEB -U__MIPSEB -U__MIPSEB__
+undef-all += -UMIPSEL -U_MIPSEL -U__MIPSEL -U__MIPSEL__
+predef-be += -DMIPSEB -D_MIPSEB -D__MIPSEB -D__MIPSEB__
+predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
+cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
+cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
                                   -fno-omit-frame-pointer
index c6a015940b410d2929d1af85d6278f413bed8486..ba3bf733d27d5d9621c235bdf38413146e548899 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.18-rc1
-# Thu Jul  6 10:02:58 2006
+# Linux kernel version: 2.6.19-rc1
+# Wed Oct 11 01:41:41 2006
 #
 CONFIG_MIPS=y
 
@@ -25,8 +25,6 @@ CONFIG_MIPS=y
 # CONFIG_MIPS_COBALT is not set
 # CONFIG_MACH_DECSTATION is not set
 # CONFIG_MIPS_EV64120 is not set
-# CONFIG_MIPS_IVR is not set
-# CONFIG_MIPS_ITE8172 is not set
 # CONFIG_MACH_JAZZ is not set
 # CONFIG_LASAT is not set
 # CONFIG_MIPS_ATLAS is not set
@@ -83,6 +81,7 @@ CONFIG_RWSEM_GENERIC_SPINLOCK=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_TIME=y
 CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
 CONFIG_DMA_COHERENT=y
 CONFIG_CPU_BIG_ENDIAN=y
@@ -132,8 +131,8 @@ CONFIG_PAGE_SIZE_4KB=y
 # CONFIG_PAGE_SIZE_64KB is not set
 # CONFIG_SIBYTE_DMA_PAGEOPS is not set
 CONFIG_MIPS_MT_DISABLED=y
-# CONFIG_MIPS_MT_SMTC is not set
 # CONFIG_MIPS_MT_SMP is not set
+# CONFIG_MIPS_MT_SMTC is not set
 # CONFIG_MIPS_VPE_LOADER is not set
 CONFIG_CPU_HAS_LLSC=y
 CONFIG_CPU_HAS_SYNC=y
@@ -185,9 +184,11 @@ CONFIG_LOCALVERSION=""
 CONFIG_LOCALVERSION_AUTO=y
 CONFIG_SWAP=y
 CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
 # CONFIG_POSIX_MQUEUE is not set
 # CONFIG_BSD_PROCESS_ACCT is not set
-CONFIG_SYSCTL=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
 # CONFIG_AUDIT is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
@@ -195,7 +196,9 @@ CONFIG_IKCONFIG_PROC=y
 # CONFIG_RELAY is not set
 CONFIG_INITRAMFS_SOURCE=""
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
 CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -204,12 +207,12 @@ CONFIG_PRINTK=y
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
 CONFIG_BASE_FULL=y
-CONFIG_RT_MUTEXES=y
 CONFIG_FUTEX=y
 CONFIG_EPOLL=y
 CONFIG_SHMEM=y
 CONFIG_SLAB=y
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_RT_MUTEXES=y
 # CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 # CONFIG_SLOB is not set
@@ -228,6 +231,7 @@ CONFIG_STOP_MACHINE=y
 #
 # Block layer
 #
+CONFIG_BLOCK=y
 # CONFIG_BLK_DEV_IO_TRACE is not set
 
 #
@@ -249,18 +253,17 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
 CONFIG_HW_HAS_PCI=y
 CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_MULTITHREAD_PROBE is not set
 CONFIG_PCI_DEBUG=y
 CONFIG_MMU=y
 
 #
 # PCCARD (PCMCIA/CardBus) support
 #
-# CONFIG_PCCARD is not set
 
 #
 # PCI Hotplug Support
 #
-# CONFIG_HOTPLUG_PCI is not set
 
 #
 # Executable file formats
@@ -271,7 +274,7 @@ CONFIG_BINFMT_ELF=y
 CONFIG_MIPS32_COMPAT=y
 CONFIG_COMPAT=y
 CONFIG_MIPS32_O32=y
-# CONFIG_MIPS32_N32 is not set
+CONFIG_MIPS32_N32=y
 CONFIG_BINFMT_ELF32=y
 
 #
@@ -288,6 +291,7 @@ CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
 CONFIG_XFRM=y
 CONFIG_XFRM_USER=m
+# CONFIG_XFRM_SUB_POLICY is not set
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 # CONFIG_IP_MULTICAST is not set
@@ -308,10 +312,12 @@ CONFIG_IP_PNP_BOOTP=y
 # CONFIG_INET_TUNNEL is not set
 CONFIG_INET_XFRM_MODE_TRANSPORT=m
 CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=y
 CONFIG_INET_DIAG=y
 CONFIG_INET_TCP_DIAG=y
 # CONFIG_TCP_CONG_ADVANCED is not set
-CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_IPV6 is not set
 # CONFIG_INET6_XFRM_TUNNEL is not set
 # CONFIG_INET6_TUNNEL is not set
@@ -341,7 +347,6 @@ CONFIG_NETWORK_SECMARK=y
 # CONFIG_ATALK is not set
 # CONFIG_X25 is not set
 # CONFIG_LAPB is not set
-# CONFIG_NET_DIVERT is not set
 # CONFIG_ECONET is not set
 # CONFIG_WAN_ROUTER is not set
 
@@ -368,7 +373,6 @@ CONFIG_NETWORK_SECMARK=y
 #
 CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
 # CONFIG_DEBUG_DRIVER is not set
 # CONFIG_SYS_HYPERVISOR is not set
 
@@ -404,7 +408,7 @@ CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_NBD=m
 # CONFIG_BLK_DEV_SX8 is not set
 # CONFIG_BLK_DEV_RAM is not set
-# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
 
@@ -412,6 +416,7 @@ CONFIG_BLK_DEV_NBD=m
 # ATA/ATAPI/MFM/RLL support
 #
 CONFIG_IDE=y
+CONFIG_IDE_MAX_HWIFS=4
 CONFIG_BLK_DEV_IDE=y
 
 #
@@ -429,10 +434,40 @@ CONFIG_BLK_DEV_IDEFLOPPY=y
 # IDE chipset support/bugfixes
 #
 CONFIG_IDE_GENERIC=y
-# CONFIG_BLK_DEV_IDEPCI is not set
+CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_SHARE_IRQ is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+# CONFIG_IDEDMA_PCI_AUTO is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+CONFIG_BLK_DEV_CMD64X=y
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
 # CONFIG_BLK_DEV_IDE_SWARM is not set
 # CONFIG_IDE_ARM is not set
-# CONFIG_BLK_DEV_IDEDMA is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
 # CONFIG_IDEDMA_AUTO is not set
 # CONFIG_BLK_DEV_HD is not set
 
@@ -441,6 +476,12 @@ CONFIG_IDE_GENERIC=y
 #
 # CONFIG_RAID_ATTRS is not set
 # CONFIG_SCSI is not set
+# CONFIG_SCSI_NETLINK is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
 
 #
 # Multi-device support (RAID and LVM)
@@ -516,6 +557,7 @@ CONFIG_NET_SB1250_MAC=y
 # CONFIG_SK98LIN is not set
 # CONFIG_TIGON3 is not set
 # CONFIG_BNX2 is not set
+# CONFIG_QLA3XXX is not set
 
 #
 # Ethernet (10000 Mbit)
@@ -650,7 +692,6 @@ CONFIG_I2C_CHARDEV=y
 # CONFIG_I2C_ALGOBIT is not set
 # CONFIG_I2C_ALGOPCF is not set
 # CONFIG_I2C_ALGOPCA is not set
-CONFIG_I2C_ALGO_SIBYTE=y
 
 #
 # I2C Hardware Bus support
@@ -712,12 +753,12 @@ CONFIG_I2C_DEBUG_CHIP=y
 #
 # Misc devices
 #
+# CONFIG_TIFM_CORE is not set
 
 #
 # Multimedia devices
 #
 # CONFIG_VIDEO_DEV is not set
-CONFIG_VIDEO_V4L2=y
 
 #
 # Digital Video Broadcasting Devices
@@ -729,6 +770,7 @@ CONFIG_VIDEO_V4L2=y
 #
 # CONFIG_FIRMWARE_EDID is not set
 # CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
 #
 # Sound
@@ -811,6 +853,7 @@ CONFIG_FS_MBCACHE=y
 # CONFIG_JFS_FS is not set
 CONFIG_FS_POSIX_ACL=y
 # CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
 # CONFIG_MINIX_FS is not set
 # CONFIG_ROMFS_FS is not set
@@ -840,8 +883,10 @@ CONFIG_DNOTIFY=y
 #
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
 CONFIG_SYSFS=y
-# CONFIG_TMPFS is not set
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_RAMFS=y
 # CONFIG_CONFIGFS_FS is not set
@@ -851,6 +896,7 @@ CONFIG_RAMFS=y
 #
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
 # CONFIG_HFS_FS is not set
 # CONFIG_HFSPLUS_FS is not set
 # CONFIG_BEFS_FS is not set
@@ -881,7 +927,6 @@ CONFIG_SUNRPC=y
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
 # CONFIG_CIFS is not set
-# CONFIG_CIFS_DEBUG2 is not set
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
 # CONFIG_AFS_FS is not set
@@ -898,6 +943,10 @@ CONFIG_MSDOS_PARTITION=y
 #
 # CONFIG_NLS is not set
 
+#
+# Distributed Lock Manager
+#
+
 #
 # Profiling support
 #
@@ -907,7 +956,8 @@ CONFIG_MSDOS_PARTITION=y
 # Kernel hacking
 #
 CONFIG_TRACE_IRQFLAGS_SUPPORT=y
-CONFIG_PRINTK_TIME=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_MUST_CHECK=y
 CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_KERNEL=y
@@ -920,12 +970,15 @@ CONFIG_DETECT_SOFTLOCKUP=y
 # CONFIG_DEBUG_SPINLOCK is not set
 CONFIG_DEBUG_MUTEXES=y
 # CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
 # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
 # CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_FS is not set
 # CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_LIST is not set
 CONFIG_FORCED_INLINING=y
 # CONFIG_RCU_TORTURE_TEST is not set
 CONFIG_CROSSCOMPILE=y
@@ -946,6 +999,10 @@ CONFIG_KEYS_DEBUG_PROC_KEYS=y
 # Cryptographic options
 #
 CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=m
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_MANAGER=m
 CONFIG_CRYPTO_HMAC=y
 CONFIG_CRYPTO_NULL=y
 CONFIG_CRYPTO_MD4=y
@@ -955,9 +1012,12 @@ CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_SHA512=y
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_CBC=m
 CONFIG_CRYPTO_DES=y
 CONFIG_CRYPTO_BLOWFISH=y
 CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
 CONFIG_CRYPTO_SERPENT=y
 CONFIG_CRYPTO_AES=m
 # CONFIG_CRYPTO_CAST5 is not set
index 6dc4135d6e11e2c35eda255b8a1fe95b0b5bc6fa..d848f1a07786bf6c1de0f538e1e61d7a5494c122 100644 (file)
@@ -37,7 +37,7 @@ extern void jazz_machine_restart(char *command);
 extern void jazz_machine_halt(void);
 extern void jazz_machine_power_off(void);
 
-void __init plat_time_init(struct irqaction *irq)
+void __init plat_timer_setup(struct irqaction *irq)
 {
        /* set the clock to 100 Hz */
        r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9);
index 1af3612a1ce862f0b43e777935d59d9a6990c1af..db80957ada8957631070f40bb10f847cd01eb26e 100644 (file)
@@ -310,7 +310,7 @@ static void flush_tlb_all_ipi(void *info)
 
 void flush_tlb_all(void)
 {
-       on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
+       on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1);
 }
 
 static void flush_tlb_mm_ipi(void *mm)
index d777b7d1a9fec6c30e3058208887cc12ffbf1ff7..f9f404a8ddad625348975d4678ee4183b21de102 100644 (file)
@@ -26,7 +26,7 @@ static cpumask_t ktext_repmask;
  * kernel.  For example, we should never put a copy on a headless node,
  * and we should respect the topology of the machine.
  */
-void __init setup_replication_mask()
+void __init setup_replication_mask(void)
 {
        cnodeid_t       cnode;
 
index 6eac36d1b8c893fcb1c56d16d1ba0808ce19e90d..bf328277c775e12f649ebeeeeb4af7f10710e493 100644 (file)
@@ -34,21 +34,21 @@ extern void smp_call_function_interrupt(void);
  * independent of board/firmware
  */
 
-static void *mailbox_0_set_regs[] = {
+static volatile void *mailbox_0_set_regs[] = {
        IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
        IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
        IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
        IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_SET_CPU),
 };
 
-static void *mailbox_0_clear_regs[] = {
+static volatile void *mailbox_0_clear_regs[] = {
        IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
        IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
        IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
        IOADDR(A_BCM1480_IMR_CPU3_BASE + R_BCM1480_IMR_MAILBOX_0_CLR_CPU),
 };
 
-static void *mailbox_0_regs[] = {
+static volatile void *mailbox_0_regs[] = {
        IOADDR(A_BCM1480_IMR_CPU0_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
        IOADDR(A_BCM1480_IMR_CPU1_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
        IOADDR(A_BCM1480_IMR_CPU2_BASE + R_BCM1480_IMR_MAILBOX_0_CPU),
index 6d57553d8ef886890dc358247f85adc7fdb52e2d..8f6a0b312f7a025b97d9785be851f0fb748f4e85 100644 (file)
@@ -69,10 +69,6 @@ EXPORT_SYMBOL(memcpy_toio);
 EXPORT_SYMBOL(memcpy_fromio);
 EXPORT_SYMBOL(memset_io);
 
-#include <asm/unistd.h>
-EXPORT_SYMBOL(sys_lseek);
-EXPORT_SYMBOL(sys_write);
-
 #include <asm/semaphore.h>
 EXPORT_SYMBOL(__up);
 EXPORT_SYMBOL(__down_interruptible);
index cd3535e1a09558d323b351fd52b87c7c2831c692..0561b73a918f5c3545f7850e40f1fed7dbea2795 100644 (file)
@@ -1248,7 +1248,7 @@ CONFIG_PARTITION_ADVANCED=y
 # CONFIG_AMIGA_PARTITION is not set
 # CONFIG_ATARI_PARTITION is not set
 # CONFIG_MAC_PARTITION is not set
-# CONFIG_MSDOS_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
 # CONFIG_LDM_PARTITION is not set
 # CONFIG_SGI_PARTITION is not set
 # CONFIG_ULTRIX_PARTITION is not set
index 47a613cdd775ce11535dbc214886bb446f9437f3..95382f99440475b097420a322031b5293858492a 100644 (file)
@@ -268,7 +268,7 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = COMMON_USER_POWER6,
                .icache_bsize           = 128,
                .dcache_bsize           = 128,
-               .num_pmcs               = 8,
+               .num_pmcs               = 6,
                .oprofile_cpu_type      = "ppc64/power6",
                .oprofile_type          = PPC_OPROFILE_POWER4,
                .oprofile_mmcra_sihv    = POWER6_MMCRA_SIHV,
index 9b49f8691d29d8a28451e8b4d6d6e858c7cb693f..0d9ff72e28526a7d2f2990388354a5fd37be13ef 100644 (file)
@@ -441,14 +441,14 @@ update_bridge_base(struct pci_bus *bus, int i)
                end = res->end - off;
                io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
                io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
-               if (end > 0xffff) {
-                       pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
-                                             start >> 16);
-                       pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
-                                             end >> 16);
+               if (end > 0xffff)
                        io_base_lo |= PCI_IO_RANGE_TYPE_32;
-               else
+               else
                        io_base_lo |= PCI_IO_RANGE_TYPE_16;
+               pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
+                               start >> 16);
+               pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
+                               end >> 16);
                pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
                pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
 
index 78d3c0fc8dfbfd1254f77fa44d680c4cdcc27b56..9bae8a5bf671344a6c5dce261272a6159a64a181 100644 (file)
@@ -199,8 +199,14 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
        pci_setup_pci_controller(phb);
        phb->arch_data = dev;
        phb->is_dynamic = mem_init_done;
-       if (dev)
-               PHB_SET_NODE(phb, of_node_to_nid(dev));
+       if (dev) {
+               int nid = of_node_to_nid(dev);
+
+               if (nid < 0 || !node_online(nid))
+                       nid = -1;
+
+               PHB_SET_NODE(phb, nid);
+       }
        return phb;
 }
 
index 7b2f6452ba7252caa11c377553392446d44c02e0..f3d4dd580dd69fe20a04dd4762866ad0f82d51ea 100644 (file)
@@ -341,13 +341,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
 
 static int instructions_to_print = 16;
 
-#ifdef CONFIG_PPC64
-#define BAD_PC(pc)     ((REGION_ID(pc) != KERNEL_REGION_ID) && \
-                        (REGION_ID(pc) != VMALLOC_REGION_ID))
-#else
-#define BAD_PC(pc)     ((pc) < KERNELBASE)
-#endif
-
 static void show_instructions(struct pt_regs *regs)
 {
        int i;
@@ -366,7 +359,8 @@ static void show_instructions(struct pt_regs *regs)
                 * bad address because the pc *should* only be a
                 * kernel address.
                 */
-               if (BAD_PC(pc) || __get_user(instr, (unsigned int __user *)pc)) {
+               if (!__kernel_text_address(pc) ||
+                    __get_user(instr, (unsigned int __user *)pc)) {
                        printk("XXXXXXXX ");
                } else {
                        if (regs->nip == pc)
index d9f10f2fc372b2ebde2bb24dbbb82267383295d7..5ed4c2ceb5caa8632c11f229193afe0596893650 100644 (file)
@@ -900,14 +900,13 @@ void kernel_fp_unavailable_exception(struct pt_regs *regs)
 
 void altivec_unavailable_exception(struct pt_regs *regs)
 {
-#if !defined(CONFIG_ALTIVEC)
        if (user_mode(regs)) {
                /* A user program has executed an altivec instruction,
                   but this kernel doesn't support altivec. */
                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
                return;
        }
-#endif
+
        printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
                        "%lx at %lx\n", regs->trap, regs->nip);
        die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
index 16fe027bbc12ffb873222dbbdc170d474028ca91..d1c0758c56110628e5cc691263f48d1eda36bbb9 100644 (file)
@@ -307,11 +307,12 @@ void __init paging_init(void)
               top_of_ram, total_ram);
        printk(KERN_DEBUG "Memory hole size: %ldMB\n",
               (top_of_ram - total_ram) >> 20);
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 #ifdef CONFIG_HIGHMEM
-       max_zone_pfns[0] = total_lowmem >> PAGE_SHIFT;
-       max_zone_pfns[1] = top_of_ram >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
 #else
-       max_zone_pfns[0] = top_of_ram >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 #endif
        free_area_init_nodes(max_zone_pfns);
 }
index 43c272075e1ae8408baa35c8e78a2d831e24312c..9da01dc8cfd9d3fc0722de803d7cfbfb9e0ab573 100644 (file)
@@ -617,9 +617,9 @@ void __init do_init_bootmem(void)
 
 void __init paging_init(void)
 {
-       unsigned long max_zone_pfns[MAX_NR_ZONES] = {
-                               lmb_end_of_DRAM() >> PAGE_SHIFT
-       };
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+       max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
        free_area_init_nodes(max_zone_pfns);
 }
 
index 0975e94ac7c469bc416b0f9fab57de41e4ff0a52..7edb6b461382b407203b5c1929ae0fd7e4ba2487 100644 (file)
@@ -32,6 +32,13 @@ config MPC834x_ITX
          Be aware that PCI initialization is the bootloader's
          responsiblilty.
 
+config MPC8360E_PB
+       bool "Freescale MPC8360E PB"
+       select DEFAULT_UIMAGE
+       select QUICC_ENGINE
+       help
+         This option enables support for the MPC836x EMDS Processor Board.
+
 endchoice
 
 config PPC_MPC832x
@@ -46,4 +53,10 @@ config MPC834x
        select PPC_INDIRECT_PCI
        default y if MPC834x_SYS || MPC834x_ITX
 
+config PPC_MPC836x
+       bool
+       select PPC_UDBG_16550
+       select PPC_INDIRECT_PCI
+       default y if MPC8360E_PB
+
 endmenu
index 9387a110d28aa4f410b9f226648c9277e0ca9b07..f1aa7e24a9382de1f18153dca7ce5a4da2c305fe 100644 (file)
@@ -5,3 +5,5 @@ obj-y                           := misc.o
 obj-$(CONFIG_PCI)              += pci.o
 obj-$(CONFIG_MPC834x_SYS)      += mpc834x_sys.o
 obj-$(CONFIG_MPC834x_ITX)      += mpc834x_itx.o
+obj-$(CONFIG_MPC8360E_PB)      += mpc8360e_pb.o
+obj-$(CONFIG_MPC832x_MDS)      += mpc832x_mds.o
index c0191900fc251b974cac3a2b7735768d915a8caa..1a523c81c06e528de4ac67bfe05d384ff054d6f5 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/root_dev.h>
 #include <linux/initrd.h>
 
+#include <asm/of_device.h>
 #include <asm/system.h>
 #include <asm/atomic.h>
 #include <asm/time.h>
@@ -141,6 +142,24 @@ static void __init mpc8360_sys_setup_arch(void)
 #endif
 }
 
+static int __init mpc8360_declare_of_platform_devices(void)
+{
+       struct device_node *np;
+
+       for (np = NULL; (np = of_find_compatible_node(np, "network",
+                                       "ucc_geth")) != NULL;) {
+               int ucc_num;
+               char bus_id[BUS_ID_SIZE];
+
+               ucc_num = *((uint *) get_property(np, "device-id", NULL)) - 1;
+               snprintf(bus_id, BUS_ID_SIZE, "ucc_geth.%u", ucc_num);
+               of_platform_device_create(np, bus_id, NULL);
+       }
+
+       return 0;
+}
+device_initcall(mpc8360_declare_of_platform_devices);
+
 void __init mpc8360_sys_init_IRQ(void)
 {
 
index ccfd0c4db87441398bbd9519f5780dcfc3354d76..d0fb959e3ef113b117d4972c373067baab96aa29 100644 (file)
@@ -781,6 +781,17 @@ static int __init create_spu(struct device_node *spe)
        if (!spu)
                goto out;
 
+       spu->node = find_spu_node_id(spe);
+       if (spu->node >= MAX_NUMNODES) {
+               printk(KERN_WARNING "SPE %s on node %d ignored,"
+                      " node number too big\n", spe->full_name, spu->node);
+               printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
+               return -ENODEV;
+       }
+       spu->nid = of_node_to_nid(spe);
+       if (spu->nid == -1)
+               spu->nid = 0;
+
        ret = spu_map_device(spu, spe);
        /* try old method */
        if (ret)
@@ -788,10 +799,6 @@ static int __init create_spu(struct device_node *spe)
        if (ret)
                goto out_free;
 
-       spu->node = find_spu_node_id(spe);
-       spu->nid = of_node_to_nid(spe);
-       if (spu->nid == -1)
-               spu->nid = 0;
        ret = spu_map_interrupts(spu, spe);
        if (ret)
                ret = spu_map_interrupts_old(spu, spe);
index e0d730045260aeb00e399024485af2197fb39bd2..0de8e114e6b68355b26c2f2ef4de546367792dbc 100644 (file)
@@ -246,6 +246,7 @@ static int spufs_cntl_open(struct inode *inode, struct file *file)
 
 static struct file_operations spufs_cntl_fops = {
        .open = spufs_cntl_open,
+       .release = simple_attr_close,
        .read = simple_attr_read,
        .write = simple_attr_write,
        .mmap = spufs_cntl_mmap,
index d4b2cf74da6aa177ecb6d598f29571b53e89817c..18ee851e33e387675ae6d9e7ab5fb18091732247 100644 (file)
@@ -62,6 +62,7 @@
 #include <asm/cache.h>
 #include <asm/8xx_immap.h>
 #include <asm/machdep.h>
+#include <asm/irq_regs.h>
 
 #include <asm/time.h>
 
@@ -129,6 +130,7 @@ void wakeup_decrementer(void)
  */
 void timer_interrupt(struct pt_regs * regs)
 {
+       struct pt_regs *old_regs;
        int next_dec;
        unsigned long cpu = smp_processor_id();
        unsigned jiffy_stamp = last_jiffy_stamp(cpu);
@@ -137,6 +139,7 @@ void timer_interrupt(struct pt_regs * regs)
        if (atomic_read(&ppc_n_lost_interrupts) != 0)
                do_IRQ(regs);
 
+       old_regs = set_irq_regs(regs);
        irq_enter();
 
        while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) {
@@ -188,6 +191,7 @@ void timer_interrupt(struct pt_regs * regs)
                ppc_md.heartbeat();
 
        irq_exit();
+       set_irq_regs(old_regs);
 }
 
 /*
index 410200046af120236e44fb35d656e57f65ceb0da..c374e53ae03a0971654d26b5670f0e670b65cf40 100644 (file)
@@ -374,11 +374,12 @@ void __init paging_init(void)
        end_pfn = start_pfn + (total_memory >> PAGE_SHIFT);
        add_active_range(0, start_pfn, end_pfn);
 
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 #ifdef CONFIG_HIGHMEM
-       max_zone_pfns[0] = total_lowmem >> PAGE_SHIFT;
-       max_zone_pfns[1] = total_memory >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_HIGHMEM] = total_memory >> PAGE_SHIFT;
 #else
-       max_zone_pfns[0] = total_memory >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_DMA] = total_memory >> PAGE_SHIFT;
 #endif /* CONFIG_HIGHMEM */
        free_area_init_nodes(max_zone_pfns);
 }
index d7b3a6afa78f86dc4209f562f9ebdedb75da34b5..1f9ea36837b1aff30544607bbdd69bf4bdb1769a 100644 (file)
@@ -196,7 +196,7 @@ static void __init mpc8272ads_fixup_enet_pdata(struct platform_device *pdev,
        bd_t* bi = (void*)__res;
        int fs_no = fsid_fcc1+pdev->id-1;
 
-       if(fs_no > ARRAY_SIZE(mpc82xx_enet_pdata)) {
+       if(fs_no >= ARRAY_SIZE(mpc82xx_enet_pdata)) {
                return;
        }
 
@@ -222,7 +222,7 @@ static void mpc8272ads_fixup_uart_pdata(struct platform_device *pdev,
        int id = fs_uart_id_scc2fsid(idx);
 
        /* no need to alter anything if console */
-       if ((id <= num) && (!pdev->dev.platform_data)) {
+       if ((id < num) && (!pdev->dev.platform_data)) {
                pinfo = &mpc8272_uart_pdata[id];
                pinfo->uart_clk = bd->bi_intfreq;
                pdev->dev.platform_data = pinfo;
index 5f130dca377009cbe3d13d8294f9ce9ee62b025b..e95d2c1117476d35d09dcb0f699192b697353727 100644 (file)
@@ -259,7 +259,7 @@ static void mpc866ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no)
        /* Get pointer to Communication Processor */
        cp = cpmp;
 
-       if(fs_no > ARRAY_SIZE(mpc8xx_enet_pdata)) {
+       if(fs_no >= ARRAY_SIZE(mpc8xx_enet_pdata)) {
                printk(KERN_ERR"No network-suitable #%d device on bus", fs_no);
                return;
        }
@@ -305,7 +305,7 @@ static void __init mpc866ads_fixup_uart_pdata(struct platform_device *pdev,
        int id = fs_uart_id_smc2fsid(idx);
 
        /* no need to alter anything if console */
-       if ((id <= num) && (!pdev->dev.platform_data)) {
+       if ((id < num) && (!pdev->dev.platform_data)) {
                pinfo = &mpc866_uart_pdata[id];
                pinfo->uart_clk = bd->bi_intfreq;
                pdev->dev.platform_data = pinfo;
index 02293141efb5871c26f788f3a57a04cb6240cf46..f8161f3557f5df414b79ff046b7d927f99fd03a8 100644 (file)
@@ -263,7 +263,7 @@ static void mpc885ads_fixup_enet_pdata(struct platform_device *pdev, int fs_no)
        char *e;
        int i;
 
-       if(fs_no > ARRAY_SIZE(mpc8xx_enet_pdata)) {
+       if(fs_no >= ARRAY_SIZE(mpc8xx_enet_pdata)) {
                printk(KERN_ERR"No network-suitable #%d device on bus", fs_no);
                return;
        }
@@ -371,7 +371,7 @@ static void __init mpc885ads_fixup_uart_pdata(struct platform_device *pdev,
        int id = fs_uart_id_smc2fsid(idx);
 
        /* no need to alter anything if console */
-       if ((id <= num) && (!pdev->dev.platform_data)) {
+       if ((id < num) && (!pdev->dev.platform_data)) {
                pinfo = &mpc885_uart_pdata[id];
                pinfo->uart_clk = bd->bi_intfreq;
                pdev->dev.platform_data = pinfo;
index 2b1e6c9a6e0e539826a3a8d45181ec5a7c72e76c..45c9fa7d7545465bd4c9bdc016b5c5cc925cf65e 100644 (file)
@@ -109,7 +109,7 @@ static LIST_HEAD(appldata_ops_list);
  *
  * schedule work and reschedule timer
  */
-static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
+static void appldata_timer_function(unsigned long data)
 {
        P_DEBUG("   -= Timer =-\n");
        P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
index c49ab8c784d27c4806885bc84e75380af10f91e5..4faf96f8a83414ddf7f36eb46aa8bb21b4e50b90 100644 (file)
@@ -117,8 +117,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
         int index;
        struct pt_regs *old_regs;
 
-       irq_enter();
        old_regs = set_irq_regs(regs);
+       irq_enter();
        asm volatile ("mc 0,0");
        if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
                /**
@@ -134,8 +134,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
                                p->handler(code);
                }
        }
-       set_irq_regs(old_regs);
        irq_exit();
+       set_irq_regs(old_regs);
 }
 
 EXPORT_SYMBOL(register_external_interrupt);
index 9f19e833a56253535af44a1481284fffe9682360..90b5ef529eb7e881acf62b7a1e45386c8e074ba8 100644 (file)
@@ -51,4 +51,3 @@ EXPORT_SYMBOL(csum_fold);
 EXPORT_SYMBOL(console_mode);
 EXPORT_SYMBOL(console_devno);
 EXPORT_SYMBOL(console_irq);
-EXPORT_SYMBOL(sys_wait4);
index d9428a0fc8fb9ee3011f90f35f74a3be97bee70e..0d14a4789bf2e1a781e8ac98a42c3ef1e6eed09a 100644 (file)
@@ -62,27 +62,26 @@ static inline unsigned long save_context_stack(struct stack_trace *trace,
 void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
 {
        register unsigned long sp asm ("15");
-       unsigned long orig_sp;
+       unsigned long orig_sp, new_sp;
 
-       sp &= PSW_ADDR_INSN;
-       orig_sp = sp;
+       orig_sp = sp & PSW_ADDR_INSN;
 
-       sp = save_context_stack(trace, &trace->skip, sp,
+       new_sp = save_context_stack(trace, &trace->skip, orig_sp,
                                S390_lowcore.panic_stack - PAGE_SIZE,
                                S390_lowcore.panic_stack);
-       if ((sp != orig_sp) && !trace->all_contexts)
+       if ((new_sp != orig_sp) && !trace->all_contexts)
                return;
-       sp = save_context_stack(trace, &trace->skip, sp,
+       new_sp = save_context_stack(trace, &trace->skip, new_sp,
                                S390_lowcore.async_stack - ASYNC_SIZE,
                                S390_lowcore.async_stack);
-       if ((sp != orig_sp) && !trace->all_contexts)
+       if ((new_sp != orig_sp) && !trace->all_contexts)
                return;
        if (task)
-               save_context_stack(trace, &trace->skip, sp,
+               save_context_stack(trace, &trace->skip, new_sp,
                                   (unsigned long) task_stack_page(task),
                                   (unsigned long) task_stack_page(task) + THREAD_SIZE);
        else
-               save_context_stack(trace, &trace->skip, sp,
+               save_context_stack(trace, &trace->skip, new_sp,
                                   S390_lowcore.thread_info,
                                   S390_lowcore.thread_info + THREAD_SIZE);
        return;
index 1d7d3938b2b1686a860c8a26c43dbc385c1b3917..21baaf5496d61b02525a933c8139524710863cfb 100644 (file)
@@ -209,11 +209,11 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
  * Do the callback functions of expired vtimer events.
  * Called from within the interrupt handler.
  */
-static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
+static void do_callbacks(struct list_head *cb_list)
 {
        struct vtimer_queue *vt_list;
        struct vtimer_list *event, *tmp;
-       void (*fn)(unsigned long, struct pt_regs*);
+       void (*fn)(unsigned long);
        unsigned long data;
 
        if (list_empty(cb_list))
@@ -224,7 +224,7 @@ static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
        list_for_each_entry_safe(event, tmp, cb_list, entry) {
                fn = event->function;
                data = event->data;
-               fn(data, regs);
+               fn(data);
 
                if (!event->interval)
                        /* delete one shot timer */
@@ -275,7 +275,7 @@ static void do_cpu_timer_interrupt(__u16 error_code)
                list_move_tail(&event->entry, &cb_list);
        }
        spin_unlock(&vt_list->lock);
-       do_callbacks(&cb_list, get_irq_regs());
+       do_callbacks(&cb_list);
 
        /* next event is first in list */
        spin_lock(&vt_list->lock);
index f6a0c44361682de8e34fdc1eec8c57a2d7977f7e..6a461d4caeffc814e2f5968c178edc1de2569bcf 100644 (file)
@@ -45,6 +45,9 @@ config GENERIC_CALIBRATE_DELAY
 config GENERIC_IOMAP
        bool
 
+config GENERIC_TIME
+       def_bool n
+
 config ARCH_MAY_HAVE_PC_FDC
        bool
 
@@ -357,6 +360,7 @@ config CPU_HAS_SR_RB
 endmenu
 
 menu "Timer support"
+depends on !GENERIC_TIME
 
 config SH_TMU
        bool "TMU timer support"
index 75f91aaae0777ab373cd476da69bd9e6bf5620bf..219179114f0fe6fda9faf5b610c7e2a17781a5cf 100644 (file)
@@ -83,7 +83,7 @@ static int hp6x0_apm_get_info(char *buf, char **start, off_t fpos, int length)
        return p - buf;
 }
 
-static irqreturn_t hp6x0_apm_interrupt(int irq, void *dev, struct pt_regs *regs)
+static irqreturn_t hp6x0_apm_interrupt(int irq, void *dev)
 {
        if (!apm_suspended)
                apm_queue_event(APM_USER_SUSPEND);
@@ -96,7 +96,7 @@ static int __init hp6x0_apm_init(void)
        int ret;
 
        ret = request_irq(HP680_BTN_IRQ, hp6x0_apm_interrupt,
-                         SA_INTERRUPT, MODNAME, 0);
+                         IRQF_DISABLED, MODNAME, 0);
        if (unlikely(ret < 0)) {
                printk(KERN_ERR MODNAME ": IRQ %d request failed\n",
                       HP680_BTN_IRQ);
index 0b7bee1a9ca5af99c0c3bdeb33aa0a8370f39174..e62524978160ac3fbb79448ea7d0656764bd2ee1 100644 (file)
@@ -135,7 +135,7 @@ static int swdrv_write(struct file *filp, const char *buff, size_t count,
        return count;
 }
 
-static irqreturn_t sw_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t sw_interrupt(int irq, void *dev_id)
 {
        landisk_btn = (0x0ff & (~ctrl_inb(PA_STATUS)));
        disable_irq(IRQ_BUTTON);
index 01c10fa5c0589092677190289dd626120df7d20a..7c3d1d304157f48d70555fa4dc71bdc199aeb00d 100644 (file)
@@ -69,7 +69,6 @@ static void __init pci_write_config(unsigned long busNo,
 
 static unsigned char m_irq_mask = 0xfb;
 static unsigned char s_irq_mask = 0xff;
-volatile unsigned long irq_err_count;
 
 static void disable_mpc1211_irq(unsigned int irq)
 {
@@ -118,7 +117,7 @@ static void mask_and_ack_mpc1211(unsigned int irq)
        if(irq < 8) {
                if(m_irq_mask & (1<<irq)){
                  if(!mpc1211_irq_real(irq)){
-                   irq_err_count++;
+                   atomic_inc(&irq_err_count)
                    printk("spurious 8259A interrupt: IRQ %x\n",irq);
                   }
                } else {
@@ -131,7 +130,7 @@ static void mask_and_ack_mpc1211(unsigned int irq)
        } else {
                if(s_irq_mask & (1<<(irq - 8))){
                  if(!mpc1211_irq_real(irq)){
-                   irq_err_count++;
+                   atomic_inc(&irq_err_count);
                    printk("spurious 8259A interrupt: IRQ %x\n",irq);
                  }
                } else {
index 2d960e9a3143b5bae8c2061b0c0bd4e4dfd4915e..b544772cbc72c61ae31aa12c643d586f90d0a2ae 100644 (file)
@@ -1,18 +1,16 @@
 /*
- * linux/arch/sh/boards/renesas/r7780rp/irq.c
- *
- * Copyright (C) 2000  Kazumoto Kojima
- *
  * Renesas Solutions Highlander R7780RP-1 Support.
  *
- * Modified for R7780RP-1 by
- * Atom Create Engineering Co., Ltd. 2002.
+ * Copyright (C) 2002  Atom Create Engineering Co., Ltd.
+ * Copyright (C) 2006  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  */
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/r7780rp/r7780rp.h>
 
 #ifdef CONFIG_SH_R7780MP
 static int mask_pos[] = {12, 11, 9, 14, 15, 8, 13, 6, 5, 4, 3, 2, 0, 0, 1, 0};
@@ -20,71 +18,26 @@ static int mask_pos[] = {12, 11, 9, 14, 15, 8, 13, 6, 5, 4, 3, 2, 0, 0, 1, 0};
 static int mask_pos[] = {15, 14, 13, 12, 11, 10, 9, 8, 7, 5, 6, 4, 0, 1, 2, 0};
 #endif
 
-static void enable_r7780rp_irq(unsigned int irq);
-static void disable_r7780rp_irq(unsigned int irq);
-
-/* shutdown is same as "disable" */
-#define shutdown_r7780rp_irq disable_r7780rp_irq
-
-static void ack_r7780rp_irq(unsigned int irq);
-static void end_r7780rp_irq(unsigned int irq);
-
-static unsigned int startup_r7780rp_irq(unsigned int irq)
-{
-       enable_r7780rp_irq(irq);
-       return 0; /* never anything pending */
-}
-
-static void disable_r7780rp_irq(unsigned int irq)
-{
-       unsigned short val;
-       unsigned short mask = 0xffff ^ (0x0001 << mask_pos[irq]);
-
-       /* Set the priority in IPR to 0 */
-       val = ctrl_inw(IRLCNTR1);
-       val &= mask;
-       ctrl_outw(val, IRLCNTR1);
-}
-
 static void enable_r7780rp_irq(unsigned int irq)
 {
-       unsigned short val;
-       unsigned short value = (0x0001 << mask_pos[irq]);
-
        /* Set priority in IPR back to original value */
-       val = ctrl_inw(IRLCNTR1);
-       val |= value;
-       ctrl_outw(val, IRLCNTR1);
-}
-
-static void ack_r7780rp_irq(unsigned int irq)
-{
-       disable_r7780rp_irq(irq);
+       ctrl_outw(ctrl_inw(IRLCNTR1) | (1 << mask_pos[irq]), IRLCNTR1);
 }
 
-static void end_r7780rp_irq(unsigned int irq)
+static void disable_r7780rp_irq(unsigned int irq)
 {
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               enable_r7780rp_irq(irq);
+       /* Set the priority in IPR to 0 */
+       ctrl_outw(ctrl_inw(IRLCNTR1) & (0xffff ^ (1 << mask_pos[irq])),
+                 IRLCNTR1);
 }
 
-static struct hw_interrupt_type r7780rp_irq_type = {
-       .typename = "R7780RP-IRQ",
-       .startup = startup_r7780rp_irq,
-       .shutdown = shutdown_r7780rp_irq,
-       .enable = enable_r7780rp_irq,
-       .disable = disable_r7780rp_irq,
-       .ack = ack_r7780rp_irq,
-       .end = end_r7780rp_irq,
+static struct irq_chip r7780rp_irq_chip __read_mostly = {
+       .name           = "r7780rp",
+       .mask           = disable_r7780rp_irq,
+       .unmask         = enable_r7780rp_irq,
+       .mask_ack       = disable_r7780rp_irq,
 };
 
-static void make_r7780rp_irq(unsigned int irq)
-{
-       disable_irq_nosync(irq);
-       irq_desc[irq].chip = &r7780rp_irq_type;
-       disable_r7780rp_irq(irq);
-}
-
 /*
  * Initialize IRQ setting
  */
@@ -92,24 +45,10 @@ void __init init_r7780rp_IRQ(void)
 {
        int i;
 
-       /* IRL0=PCI Slot #A
-        * IRL1=PCI Slot #B
-        * IRL2=PCI Slot #C
-        * IRL3=PCI Slot #D
-        * IRL4=CF Card
-        * IRL5=CF Card Insert
-        * IRL6=M66596
-        * IRL7=SD Card
-        * IRL8=Touch Panel
-        * IRL9=SCI
-        * IRL10=Serial
-        * IRL11=Extention #A
-        * IRL11=Extention #B
-        * IRL12=Debug LAN
-        * IRL13=Push Switch
-        * IRL14=ZiggBee IO
-        */
-
-       for (i=0; i<15; i++)
-               make_r7780rp_irq(i);
+       for (i = 0; i < 15; i++) {
+               disable_irq_nosync(i);
+               set_irq_chip_and_handler(i, &r7780rp_irq_chip,
+                                        handle_level_irq);
+               enable_r7780rp_irq(i);
+       }
 }
index f5e98c56b530a7c23d3e43b77e2b063cf19a448f..540d0bf16446e6edf6f3375e99661d5712ded7ce 100644 (file)
@@ -33,7 +33,7 @@ extern void pcibios_init(void);
  * EraseConfig handling functions
  */
 
-static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
 {
        volatile char dummy __attribute__((unused)) = * (volatile char *) 0xb8000000;
 
index 38f1e8171a3abbf361ac628e2b1f63c24c4b9f92..4d49b5cbcc1333632c685adb64cef2b5e79210f7 100644 (file)
@@ -71,7 +71,7 @@ static struct hw_interrupt_type hd64461_irq_type = {
        .end            = end_hd64461_irq,
 };
 
-static irqreturn_t hd64461_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t hd64461_interrupt(int irq, void *dev_id)
 {
        printk(KERN_INFO
               "HD64461: spurious interrupt, nirr: 0x%x nimr: 0x%x\n",
index 72320d02d69af015a7996997945ebc93918960d5..43431855ec86976874a0b4cf2c76305d10b42d5a 100644 (file)
@@ -85,7 +85,7 @@ static struct {
     void *dev;
 } handlers[GPIO_NPORTS * 8];
 
-static irqreturn_t hd64465_gpio_interrupt(int irq, void *dev, struct pt_regs *regs)
+static irqreturn_t hd64465_gpio_interrupt(int irq, void *dev)
 {
        unsigned short port, pin, isr, mask, portpin;
        
index 30573d3e1966717c46c420703972166625cb785f..d126e1f30dee60b99f0f959bd8755516c3618086 100644 (file)
@@ -84,7 +84,7 @@ static struct hw_interrupt_type hd64465_irq_type = {
 };
 
 
-static irqreturn_t hd64465_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t hd64465_interrupt(int irq, void *dev_id)
 {
        printk(KERN_INFO
               "HD64465: spurious interrupt, nirr: 0x%x nimr: 0x%x\n",
index 392c8b12ce36789dea7dbe0b8b3a25084f49371b..bf1b28feca06d7222ca5c5f53e272b753a0d6a3f 100644 (file)
@@ -88,8 +88,7 @@ static struct hw_interrupt_type voyagergx_irq_type = {
        .end = end_voyagergx_irq,
 };
 
-static irqreturn_t voyagergx_interrupt(int irq, void *dev_id,
-                                     struct pt_regs *regs)
+static irqreturn_t voyagergx_interrupt(int irq, void *dev_id)
 {
        printk(KERN_INFO
               "VoyagerGX: spurious interrupt, status: 0x%x\n",
index 9cb0709241808b6365e1a024f048722cbffb8a94..0caf11bb7e27993ae65d6e0ae5ff1531514c931a 100644 (file)
@@ -51,7 +51,7 @@ static volatile struct g2_dma_info *g2_dma = (volatile struct g2_dma_info *)0xa0
        ((g2_dma->channel[i].size - \
          g2_dma->status[i].size) & 0x0fffffff)
 
-static irqreturn_t g2_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t g2_dma_interrupt(int irq, void *dev_id)
 {
        int i;
 
index c1b6bc23c107f3cfc0d39941fd9cff9e2c2b9529..838fad566eaf68f8e4282fa4f218c02915e15147 100644 (file)
@@ -21,7 +21,7 @@
 static unsigned int xfer_complete;
 static int count;
 
-static irqreturn_t pvr2_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t pvr2_dma_interrupt(int irq, void *dev_id)
 {
        if (get_dma_residue(PVR2_CASCADE_CHAN)) {
                printk(KERN_WARNING "DMA: SH DMAC did not complete transfer "
index cbbe8bce3d679fd7d5b77e4f40363fd314dc3dee..d8ece20bb2cf078fd913facab2fdc4def5d1f384 100644 (file)
@@ -60,9 +60,9 @@ static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
  * Besides that it needs to waken any waiting process, which should handle
  * setting up the next transfer.
  */
-static irqreturn_t dma_tei(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t dma_tei(int irq, void *dev_id)
 {
-       struct dma_channel *chan = (struct dma_channel *)dev_id;
+       struct dma_channel *chan = dev_id;
        u32 chcr;
 
        chcr = ctrl_inl(CHCR[chan->chan]);
@@ -228,7 +228,7 @@ static inline int dmaor_reset(void)
 }
 
 #if defined(CONFIG_CPU_SH4)
-static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t dma_err(int irq, void *dummy)
 {
        dmaor_reset();
        disable_irq(irq);
index dbe837884983285c302cc60d64e900fbd7ccc94c..85e1ee2e2e7b47d6997304768bfbc25c5998d92b 100644 (file)
@@ -155,7 +155,7 @@ int __init sh7751_pcic_init(struct sh4_pci_address_map *map)
         */
        pr_debug("PCI: Mapping IO address 0x%x - 0x%x to base 0x%x\n",
                 PCIBIOS_MIN_IO, (64 << 10),
-                SH4_PCI_IO_BASE + PCIBIOS_MIN_IO);
+                SH7751_PCI_IO_BASE + PCIBIOS_MIN_IO);
 
        /*
         * XXX: For now, leave this board-specific. In the event we have other
@@ -163,7 +163,7 @@ int __init sh7751_pcic_init(struct sh4_pci_address_map *map)
         */
 #ifdef CONFIG_SH_BIGSUR
        bigsur_port_map(PCIBIOS_MIN_IO, (64 << 10),
-                       SH4_PCI_IO_BASE + PCIBIOS_MIN_IO, 0);
+                       SH7751_PCI_IO_BASE + PCIBIOS_MIN_IO, 0);
 #endif
 
        /* Make sure the MSB's of IO window are set to access PCI space
index 4ab5ea6b35fb5eeaf22fa5dd5889803f4d9555c1..efecb3d5995c24e8dd7221420b0205da06ab3d4a 100644 (file)
@@ -161,7 +161,7 @@ static char * pci_commands[16]={
        "Memory Write-and-Invalidate"
 };
 
-static irqreturn_t st40_pci_irq(int irq, void *dev_instance, struct pt_regs *regs)
+static irqreturn_t st40_pci_irq(int irq, void *dev_instance)
 {
        unsigned pci_int, pci_air, pci_cir, pci_aint;
        static int count=0;
index e30e4b7aa70e739be7ed77211b32d5cc04f1801f..d4b2bb7e08c706772df81f9f79766c68a8bbefbf 100644 (file)
  * These are the "new Hitachi style" interrupts, as present on the
  * Hitachi 7751, the STM ST40 STB1, SH7760, and SH7780.
  */
-
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <asm/system.h>
 #include <asm/io.h>
-#include <asm/machvec.h>
-
-struct intc2_data {
-       unsigned char msk_offset;
-       unsigned char msk_shift;
-
-       int (*clear_irq) (int);
-};
-
-static struct intc2_data intc2_data[NR_INTC2_IRQS];
-
-static void enable_intc2_irq(unsigned int irq);
-static void disable_intc2_irq(unsigned int irq);
-
-/* shutdown is same as "disable" */
-#define shutdown_intc2_irq disable_intc2_irq
-
-static void mask_and_ack_intc2(unsigned int);
-static void end_intc2_irq(unsigned int irq);
-
-static unsigned int startup_intc2_irq(unsigned int irq)
-{
-       enable_intc2_irq(irq);
-       return 0; /* never anything pending */
-}
-
-static struct hw_interrupt_type intc2_irq_type = {
-       .typename       = "INTC2-IRQ",
-       .startup        = startup_intc2_irq,
-       .shutdown       = shutdown_intc2_irq,
-       .enable         = enable_intc2_irq,
-       .disable        = disable_intc2_irq,
-       .ack            = mask_and_ack_intc2,
-       .end            = end_intc2_irq
-};
 
 static void disable_intc2_irq(unsigned int irq)
 {
-       int irq_offset = irq - INTC2_FIRST_IRQ;
-       int msk_shift, msk_offset;
-
-       /* Sanity check */
-       if (unlikely(irq_offset < 0 || irq_offset >= NR_INTC2_IRQS))
-               return;
-
-       msk_shift = intc2_data[irq_offset].msk_shift;
-       msk_offset = intc2_data[irq_offset].msk_offset;
-
-       ctrl_outl(1 << msk_shift,
-                 INTC2_BASE + INTC2_INTMSK_OFFSET + msk_offset);
+       struct intc2_data *p = get_irq_chip_data(irq);
+       ctrl_outl(1 << p->msk_shift,
+                 INTC2_BASE + INTC2_INTMSK_OFFSET + p->msk_offset);
 }
 
 static void enable_intc2_irq(unsigned int irq)
 {
-       int irq_offset = irq - INTC2_FIRST_IRQ;
-       int msk_shift, msk_offset;
-
-       /* Sanity check */
-       if (unlikely(irq_offset < 0 || irq_offset >= NR_INTC2_IRQS))
-               return;
-
-       msk_shift = intc2_data[irq_offset].msk_shift;
-       msk_offset = intc2_data[irq_offset].msk_offset;
-
-       ctrl_outl(1 << msk_shift,
-                 INTC2_BASE + INTC2_INTMSKCLR_OFFSET + msk_offset);
-}
-
-static void mask_and_ack_intc2(unsigned int irq)
-{
-       disable_intc2_irq(irq);
+       struct intc2_data *p = get_irq_chip_data(irq);
+       ctrl_outl(1 << p->msk_shift,
+                 INTC2_BASE + INTC2_INTMSKCLR_OFFSET + p->msk_offset);
 }
 
-static void end_intc2_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               enable_intc2_irq(irq);
-
-       if (unlikely(intc2_data[irq - INTC2_FIRST_IRQ].clear_irq))
-               intc2_data[irq - INTC2_FIRST_IRQ].clear_irq(irq);
-}
+static struct irq_chip intc2_irq_chip = {
+       .typename       = "intc2",
+       .mask           = disable_intc2_irq,
+       .unmask         = enable_intc2_irq,
+       .mask_ack       = disable_intc2_irq,
+};
 
 /*
  * Setup an INTC2 style interrupt.
@@ -108,46 +47,30 @@ static void end_intc2_irq(unsigned int irq)
  *                         |     |             |  |
  *    make_intc2_irq(84,   0,   16,            0, 13);
  */
-void make_intc2_irq(unsigned int irq,
-                   unsigned int ipr_offset, unsigned int ipr_shift,
-                   unsigned int msk_offset, unsigned int msk_shift,
-                   unsigned int priority)
+void make_intc2_irq(struct intc2_data *p)
 {
-       int irq_offset = irq - INTC2_FIRST_IRQ;
        unsigned int flags;
        unsigned long ipr;
 
-       if (unlikely(irq_offset < 0 || irq_offset >= NR_INTC2_IRQS))
-               return;
-
-       disable_irq_nosync(irq);
-
-       /* Fill the data we need */
-       intc2_data[irq_offset].msk_offset = msk_offset;
-       intc2_data[irq_offset].msk_shift  = msk_shift;
-       intc2_data[irq_offset].clear_irq = NULL;
+       disable_irq_nosync(p->irq);
 
        /* Set the priority level */
        local_irq_save(flags);
 
-       ipr = ctrl_inl(INTC2_BASE + INTC2_INTPRI_OFFSET + ipr_offset);
-       ipr &= ~(0xf << ipr_shift);
-       ipr |= priority << ipr_shift;
-       ctrl_outl(ipr, INTC2_BASE + INTC2_INTPRI_OFFSET + ipr_offset);
+       ipr = ctrl_inl(INTC2_BASE + INTC2_INTPRI_OFFSET + p->ipr_offset);
+       ipr &= ~(0xf << p->ipr_shift);
+       ipr |= p->priority << p->ipr_shift;
+       ctrl_outl(ipr, INTC2_BASE + INTC2_INTPRI_OFFSET + p->ipr_offset);
 
        local_irq_restore(flags);
 
-       irq_desc[irq].chip = &intc2_irq_type;
+       set_irq_chip_and_handler(p->irq, &intc2_irq_chip, handle_level_irq);
+       set_irq_chip_data(p->irq, p);
 
-       disable_intc2_irq(irq);
+       enable_intc2_irq(p->irq);
 }
 
-static struct intc2_init {
-       unsigned short irq;
-       unsigned char ipr_offset, ipr_shift;
-       unsigned char msk_offset, msk_shift;
-       unsigned char priority;
-} intc2_init_data[]  __initdata = {
+static struct intc2_data intc2_irq_table[] = {
 #if defined(CONFIG_CPU_SUBTYPE_ST40)
        {64,  0,  0, 0,  0, 13},        /* PCI serr */
        {65,  0,  4, 0,  1, 13},        /* PCI err */
@@ -266,19 +189,6 @@ void __init init_IRQ_intc2(void)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(intc2_init_data); i++) {
-               struct intc2_init *p = intc2_init_data + i;
-               make_intc2_irq(p->irq, p->ipr_offset, p->ipr_shift,
-                              p-> msk_offset, p->msk_shift, p->priority);
-       }
-}
-
-/* Adds a termination callback to the interrupt */
-void intc2_add_clear_irq(int irq, int (*fn)(int))
-{
-       if (unlikely(irq < INTC2_FIRST_IRQ))
-               return;
-
-       intc2_data[irq - INTC2_FIRST_IRQ].clear_irq = fn;
+       for (i = 0; i < ARRAY_SIZE(intc2_irq_table); i++)
+               make_intc2_irq(intc2_irq_table + i);
 }
-
index f785822cd5dea4fb32e908a93f328c00ec3ffa4d..8944abdf6e1c27bd6177458159202dc2661f9bb3 100644 (file)
@@ -1,11 +1,10 @@
 /*
- * arch/sh/kernel/cpu/irq/ipr.c
+ * Interrupt handling for IPR-based IRQ.
  *
  * Copyright (C) 1999  Niibe Yutaka & Takeshi Yaegashi
  * Copyright (C) 2000  Kazumoto Kojima
- * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
- *
- * Interrupt handling for IPR-based IRQ.
+ * Copyright (C) 2003  Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
+ * Copyright (C) 2006  Paul Mundt
  *
  * Supported system:
  *     On-chip supporting modules (TMU, RTC, etc.).
  *     Hitachi SolutionEngine external I/O:
  *             MS7709SE01, MS7709ASE01, and MS7750SE01
  *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
  */
-
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/module.h>
-
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/machvec.h>
@@ -28,93 +28,45 @@ struct ipr_data {
        int shift;              /* Shifts of the 16-bit data */
        int priority;           /* The priority */
 };
-static struct ipr_data ipr_data[NR_IRQS];
-
-static void enable_ipr_irq(unsigned int irq);
-static void disable_ipr_irq(unsigned int irq);
-
-/* shutdown is same as "disable" */
-#define shutdown_ipr_irq disable_ipr_irq
-
-static void mask_and_ack_ipr(unsigned int);
-static void end_ipr_irq(unsigned int irq);
-
-static unsigned int startup_ipr_irq(unsigned int irq)
-{
-       enable_ipr_irq(irq);
-       return 0; /* never anything pending */
-}
-
-static struct hw_interrupt_type ipr_irq_type = {
-       .typename = "IPR-IRQ",
-       .startup = startup_ipr_irq,
-       .shutdown = shutdown_ipr_irq,
-       .enable = enable_ipr_irq,
-       .disable = disable_ipr_irq,
-       .ack = mask_and_ack_ipr,
-       .end = end_ipr_irq
-};
 
 static void disable_ipr_irq(unsigned int irq)
 {
-       unsigned long val;
-       unsigned int addr = ipr_data[irq].addr;
-       unsigned short mask = 0xffff ^ (0x0f << ipr_data[irq].shift);
-
+       struct ipr_data *p = get_irq_chip_data(irq);
        /* Set the priority in IPR to 0 */
-       val = ctrl_inw(addr);
-       val &= mask;
-       ctrl_outw(val, addr);
+       ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << p->shift)), p->addr);
 }
 
 static void enable_ipr_irq(unsigned int irq)
 {
-       unsigned long val;
-       unsigned int addr = ipr_data[irq].addr;
-       int priority = ipr_data[irq].priority;
-       unsigned short value = (priority << ipr_data[irq].shift);
-
+       struct ipr_data *p = get_irq_chip_data(irq);
        /* Set priority in IPR back to original value */
-       val = ctrl_inw(addr);
-       val |= value;
-       ctrl_outw(val, addr);
+       ctrl_outw(ctrl_inw(p->addr) | (p->priority << p->shift), p->addr);
 }
 
-static void mask_and_ack_ipr(unsigned int irq)
-{
-       disable_ipr_irq(irq);
-
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
-       /* This is needed when we use edge triggered setting */
-       /* XXX: Is it really needed? */
-       if (IRQ0_IRQ <= irq && irq <= IRQ5_IRQ) {
-               /* Clear external interrupt request */
-               int a = ctrl_inb(INTC_IRR0);
-               a &= ~(1 << (irq - IRQ0_IRQ));
-               ctrl_outb(a, INTC_IRR0);
-       }
-#endif
-}
-
-static void end_ipr_irq(unsigned int irq)
-{
-       if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
-               enable_ipr_irq(irq);
-}
+static struct irq_chip ipr_irq_chip = {
+       .name           = "ipr",
+       .mask           = disable_ipr_irq,
+       .unmask         = enable_ipr_irq,
+       .mask_ack       = disable_ipr_irq,
+};
 
 void make_ipr_irq(unsigned int irq, unsigned int addr, int pos, int priority)
 {
+       struct ipr_data ipr_data;
+
        disable_irq_nosync(irq);
-       ipr_data[irq].addr = addr;
-       ipr_data[irq].shift = pos*4; /* POSition (0-3) x 4 means shift */
-       ipr_data[irq].priority = priority;
 
-       irq_desc[irq].chip = &ipr_irq_type;
-       disable_ipr_irq(irq);
+       ipr_data.addr = addr;
+       ipr_data.shift = pos*4; /* POSition (0-3) x 4 means shift */
+       ipr_data.priority = priority;
+
+       set_irq_chip_and_handler(irq, &ipr_irq_chip, handle_level_irq);
+       set_irq_chip_data(irq, &ipr_data);
+
+       enable_ipr_irq(irq);
 }
 
+/* XXX: This needs to die a horrible death.. */
 void __init init_IRQ(void)
 {
 #ifndef CONFIG_CPU_SUBTYPE_SH7780
index 44daf44833f90852e0f39557784fab51e2fc8ae2..6be46f0686b77c1c8110185afd4b91d8fe67178a 100644 (file)
@@ -49,198 +49,3 @@ ENTRY(nmi_slot)
 #endif
 ENTRY(user_break_point_trap)
        .long   break_point_trap        /* 1E0 */
-ENTRY(interrupt_table)
-       ! external hardware
-       .long   do_IRQ  ! 0000          /* 200 */
-       .long   do_IRQ  ! 0001
-       .long   do_IRQ  ! 0010
-       .long   do_IRQ  ! 0011
-       .long   do_IRQ  ! 0100
-       .long   do_IRQ  ! 0101
-       .long   do_IRQ  ! 0110
-       .long   do_IRQ  ! 0111
-       .long   do_IRQ  ! 1000          /* 300 */
-       .long   do_IRQ  ! 1001
-       .long   do_IRQ  ! 1010
-       .long   do_IRQ  ! 1011
-       .long   do_IRQ  ! 1100
-       .long   do_IRQ  ! 1101
-       .long   do_IRQ  ! 1110
-       .long   exception_error         
-       ! Internal hardware
-       .long   do_IRQ  ! TMU0 tuni0    /* 400 */
-       .long   do_IRQ  ! TMU1 tuni1
-       .long   do_IRQ  ! TMU2 tuni2
-       .long   do_IRQ  !      ticpi2
-       .long   do_IRQ  ! RTC  ati
-       .long   do_IRQ  !      pri
-       .long   do_IRQ  !      cui
-       .long   do_IRQ  ! SCI  eri
-       .long   do_IRQ  !      rxi      /* 500 */
-       .long   do_IRQ  !      txi
-       .long   do_IRQ  !      tei
-       .long   do_IRQ  ! WDT  iti      /* 560 */
-       .long   do_IRQ  ! REF  rcmi
-       .long   do_IRQ  !      rovi
-       .long   do_IRQ                  
-       .long   do_IRQ                  /* 5E0 */
-#if  defined(CONFIG_CPU_SUBTYPE_SH7707) || \
-     defined(CONFIG_CPU_SUBTYPE_SH7709) || \
-     defined(CONFIG_CPU_SUBTYPE_SH7706) || \
-     defined(CONFIG_CPU_SUBTYPE_SH7300) || \
-     defined(CONFIG_CPU_SUBTYPE_SH7705) || \
-     defined(CONFIG_CPU_SUBTYPE_SH7710)
-       .long   do_IRQ  ! 32 IRQ  irq0  /* 600 */
-       .long   do_IRQ  ! 33      irq1
-       .long   do_IRQ  ! 34      irq2
-       .long   do_IRQ  ! 35      irq3
-       .long   do_IRQ  ! 36      irq4
-       .long   do_IRQ  ! 37      irq5
-       .long   do_IRQ  ! 38
-       .long   do_IRQ  ! 39
-       .long   do_IRQ  ! 40 PINT pint0-7       /* 700 */
-       .long   do_IRQ  ! 41      pint8-15
-       .long   do_IRQ  ! 42
-       .long   do_IRQ  ! 43
-       .long   do_IRQ  ! 44
-       .long   do_IRQ  ! 45    
-       .long   do_IRQ  ! 46
-       .long   do_IRQ  ! 47
-       .long   do_IRQ  ! 48 DMAC dei0  /* 800 */
-       .long   do_IRQ  ! 49      dei1
-       .long   do_IRQ  ! 50      dei2
-       .long   do_IRQ  ! 51      dei3
-       .long   do_IRQ  ! 52 IrDA eri1
-       .long   do_IRQ  ! 53      rxi1
-       .long   do_IRQ  ! 54      bri1
-       .long   do_IRQ  ! 55      txi1
-       .long   do_IRQ  ! 56 SCIF eri2
-       .long   do_IRQ  ! 57      rxi2
-       .long   do_IRQ  ! 58      bri2
-       .long   do_IRQ  ! 59      txi2
-       .long   do_IRQ  ! 60 ADC  adi   /* 980 */
-#if defined(CONFIG_CPU_SUBTYPE_SH7705)
-       .long   exception_none  ! 61    /* 9A0 */
-       .long   exception_none  ! 62
-       .long   exception_none  ! 63
-       .long   exception_none  ! 64    /* A00 */
-       .long   do_IRQ  ! 65 USB  usi0
-       .long   do_IRQ  ! 66      usi1
-       .long   exception_none  ! 67
-       .long   exception_none  ! 68
-       .long   exception_none  ! 69
-       .long   exception_none  ! 70
-       .long   exception_none  ! 71
-       .long   exception_none  ! 72    /* B00 */
-       .long   exception_none  ! 73
-       .long   exception_none  ! 74
-       .long   exception_none  ! 75
-       .long   exception_none  ! 76
-       .long   exception_none  ! 77
-       .long   exception_none  ! 78
-       .long   exception_none  ! 79
-       .long   do_IRQ  ! 80 TPU0 tpi0  /* C00 */
-       .long   do_IRQ  ! 81 TPU1 tpi1
-       .long   exception_none  ! 82
-       .long   exception_none  ! 83
-       .long   do_IRQ  ! 84 TPU2 tpi2
-       .long   do_IRQ  ! 85 TPU3 tpi3  /* CA0 */
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7300)
-       .long   do_IRQ  ! 61 LCDC lcdi  /* 9A0 */
-       .long   do_IRQ  ! 62 PCC  pcc0i
-       .long   do_IRQ  ! 63      pcc1i /* 9E0 */
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7710)
-       .long   exception_none  ! 61    /* 9A0 */
-       .long   exception_none  ! 62
-       .long   exception_none  ! 63
-       .long   exception_none  ! 64    /* A00 */
-       .long   exception_none  ! 65
-       .long   exception_none  ! 66
-       .long   exception_none  ! 67
-       .long   exception_none  ! 68
-       .long   exception_none  ! 69
-       .long   exception_none  ! 70
-       .long   exception_none  ! 71
-       .long   exception_none  ! 72    /* B00 */
-       .long   exception_none  ! 73
-       .long   exception_none  ! 74
-       .long   exception_none  ! 75
-       .long   do_IRQ  ! 76 DMAC2 dei4 /* B80 */
-       .long   do_IRQ  ! 77 DMAC2 dei5
-       .long   exception_none  ! 78
-       .long   do_IRQ  ! 79 IPSEC ipseci /* BE0 */
-       .long   do_IRQ  ! 80 EDMAC eint0 /* C00 */
-       .long   do_IRQ  ! 81 EDMAC eint1
-       .long   do_IRQ  ! 82 EDMAC eint2
-       .long   exception_none  ! 83    /* C60 */
-       .long   exception_none  ! 84
-       .long   exception_none  ! 85
-       .long   exception_none  ! 86
-       .long   exception_none  ! 87
-       .long   exception_none  ! 88    /* D00 */
-       .long   exception_none  ! 89
-       .long   exception_none  ! 90
-       .long   exception_none  ! 91
-       .long   exception_none  ! 92
-       .long   exception_none  ! 93
-       .long   exception_none  ! 94
-       .long   exception_none  ! 95
-       .long   do_IRQ  ! 96 SIOF eri0  /* E00 */
-       .long   do_IRQ  ! 97      txi0
-       .long   do_IRQ  ! 98      rxi0
-       .long   do_IRQ  ! 99      cci0
-       .long   do_IRQ  ! 100     eri1  /* E80 */
-       .long   do_IRQ  ! 101     txi1
-       .long   do_IRQ  ! 102     rxi2
-       .long   do_IRQ  ! 103     cci3
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7300)
-       .long   do_IRQ  ! 64
-       .long   do_IRQ  ! 65
-       .long   do_IRQ  ! 66
-       .long   do_IRQ  ! 67
-       .long   do_IRQ  ! 68
-       .long   do_IRQ  ! 69
-       .long   do_IRQ  ! 70
-       .long   do_IRQ  ! 71
-       .long   do_IRQ  ! 72
-       .long   do_IRQ  ! 73
-       .long   do_IRQ  ! 74
-       .long   do_IRQ  ! 75
-       .long   do_IRQ  ! 76
-       .long   do_IRQ  ! 77
-       .long   do_IRQ  ! 78
-       .long   do_IRQ  ! 79
-       .long   do_IRQ  ! 80 SCIF0(SH7300)
-       .long   do_IRQ  ! 81
-       .long   do_IRQ  ! 82
-       .long   do_IRQ  ! 83
-       .long   do_IRQ  ! 84
-       .long   do_IRQ  ! 85
-       .long   do_IRQ  ! 86
-       .long   do_IRQ  ! 87
-       .long   do_IRQ  ! 88
-       .long   do_IRQ  ! 89
-       .long   do_IRQ  ! 90
-       .long   do_IRQ  ! 91
-       .long   do_IRQ  ! 92
-       .long   do_IRQ  ! 93
-       .long   do_IRQ  ! 94
-       .long   do_IRQ  ! 95
-       .long   do_IRQ  ! 96
-       .long   do_IRQ  ! 97
-       .long   do_IRQ  ! 98
-       .long   do_IRQ  ! 99
-       .long   do_IRQ  ! 100
-       .long   do_IRQ  ! 101
-       .long   do_IRQ  ! 102
-       .long   do_IRQ  ! 103
-       .long   do_IRQ  ! 104
-       .long   do_IRQ  ! 105
-       .long   do_IRQ  ! 106
-       .long   do_IRQ  ! 107
-       .long   do_IRQ  ! 108
-#endif
-#endif
index 7146893a6cca5c3ebf33b78565eb2144c2f2f188..3f4cd043e900ef5ae6cde1094d506598246d98a3 100644 (file)
@@ -53,503 +53,3 @@ ENTRY(nmi_slot)
 #endif
 ENTRY(user_break_point_trap)
        .long   break_point_trap        /* 1E0 */
-ENTRY(interrupt_table)
-       ! external hardware
-       .long   do_IRQ  ! 0000          /* 200 */
-       .long   do_IRQ  ! 0001
-       .long   do_IRQ  ! 0010
-       .long   do_IRQ  ! 0011
-       .long   do_IRQ  ! 0100
-       .long   do_IRQ  ! 0101
-       .long   do_IRQ  ! 0110
-       .long   do_IRQ  ! 0111
-       .long   do_IRQ  ! 1000          /* 300 */
-       .long   do_IRQ  ! 1001
-       .long   do_IRQ  ! 1010
-       .long   do_IRQ  ! 1011
-       .long   do_IRQ  ! 1100
-       .long   do_IRQ  ! 1101
-       .long   do_IRQ  ! 1110
-       .long   exception_error         
-       ! Internal hardware
-#ifndef CONFIG_CPU_SUBTYPE_SH7780
-       .long   do_IRQ  ! TMU0 tuni0    /* 400 */
-       .long   do_IRQ  ! TMU1 tuni1
-       .long   do_IRQ  ! TMU2 tuni2
-       .long   do_IRQ  !      ticpi2
-#if  defined(CONFIG_CPU_SUBTYPE_SH7760)
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error                 /* 500 */
-       .long   exception_error
-       .long   exception_error
-#else
-       .long   do_IRQ  ! RTC  ati
-       .long   do_IRQ  !      pri
-       .long   do_IRQ  !      cui
-       .long   do_IRQ  ! SCI  eri
-       .long   do_IRQ  !      rxi      /* 500 */
-       .long   do_IRQ  !      txi
-       .long   do_IRQ  !      tei
-#endif
-       .long   do_IRQ  ! WDT  iti      /* 560 */
-       .long   do_IRQ  ! REF  rcmi
-       .long   do_IRQ  !      rovi
-       .long   do_IRQ                  
-       .long   do_IRQ                  /* 5E0 */
-       .long   do_IRQ  ! 32 Hitachi UDI        /* 600 */
-       .long   do_IRQ  ! 33 GPIO
-       .long   do_IRQ  ! 34 DMAC dmte0
-       .long   do_IRQ  ! 35      dmte1
-       .long   do_IRQ  ! 36      dmte2
-       .long   do_IRQ  ! 37      dmte3
-       .long   do_IRQ  ! 38      dmae
-       .long   exception_error                 ! 39    /* 6E0 */
-#if defined(CONFIG_CPU_SUBTYPE_SH7760)
-       .long   exception_error                         /* 700 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error                         /* 760 */
-#else
-       .long   do_IRQ  ! 40 SCIF eri           /* 700 */
-       .long   do_IRQ  ! 41      rxi
-       .long   do_IRQ  ! 42      bri
-       .long   do_IRQ  ! 43      txi
-#endif
-#if CONFIG_NR_ONCHIP_DMA_CHANNELS == 8
-       .long   do_IRQ  ! 44 DMAC dmte4         /* 780 */
-       .long   do_IRQ  ! 45      dmte5
-       .long   do_IRQ  ! 46      dmte6
-       .long   do_IRQ  ! 47      dmte7         /* 7E0 */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
-       .long   do_IRQ  ! 44 IIC1 ali           /* 780 */
-       .long   do_IRQ  ! 45      tacki
-       .long   do_IRQ  ! 46      waiti
-       .long   do_IRQ  ! 47      dtei          /* 7E0 */
-       .long   do_IRQ  ! 48 DMAC dei0          /* 800 */
-       .long   do_IRQ  ! 49      dei1          /* 820 */
-#else
-       .long   exception_error                 ! 44    /* 780 */
-       .long   exception_error                 ! 45
-       .long   exception_error                 ! 46
-       .long   exception_error                 ! 47
-#endif
-#if defined(CONFIG_SH_FPU)
-       .long   do_fpu_state_restore    ! 48    /* 800 */
-       .long   do_fpu_state_restore    ! 49    /* 820 */
-#elif !defined(CONFIG_CPU_SUBTYPE_SH7343) && \
-      !defined(CONFIG_CPU_SUBTYPE_SH73180)
-       .long   exception_error
-       .long   exception_error
-#endif
-#if defined(CONFIG_CPU_SUBTYPE_SH7751)
-       .long   exception_error                 /* 840 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error                 /* 900 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! PCI serr      /* A00 */
-       .long   do_IRQ  !     dma3
-       .long   do_IRQ  !     dma2
-       .long   do_IRQ  !     dma1
-       .long   do_IRQ  !     dma0
-       .long   do_IRQ  !     pwon
-       .long   do_IRQ  !     pwdwn
-       .long   do_IRQ  !     err
-       .long   do_IRQ  ! TMU3 tuni3    /* B00 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! TMU4 tuni4    /* B80 */
-#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
-       .long   do_IRQ  ! IRQ   irq6    /* 840 */
-       .long   do_IRQ  !       irq7
-       .long   do_IRQ  ! SCIF  eri0
-       .long   do_IRQ  !       rxi0
-       .long   do_IRQ  !       bri0
-       .long   do_IRQ  !       txi0
-       .long   do_IRQ  ! HCAN2 cani0   /* 900 */
-       .long   do_IRQ  !       cani1
-       .long   do_IRQ  ! SSI   ssii0
-       .long   do_IRQ  !       ssii1
-       .long   do_IRQ  ! HAC   haci0
-       .long   do_IRQ  !       haci1
-       .long   do_IRQ  ! IIC   iici0
-       .long   do_IRQ  !       iici1
-       .long   do_IRQ  ! USB   usbi    /* A00 */
-       .long   do_IRQ  ! LCDC  vint
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! DMABRG dmabrgi0
-       .long   do_IRQ  !        dmabrgi1
-       .long   do_IRQ  !        dmabrgi2
-       .long   exception_error
-       .long   do_IRQ  ! SCIF  eri1    /* B00 */
-       .long   do_IRQ  !       rxi1
-       .long   do_IRQ  !       bri1
-       .long   do_IRQ  !       txi1
-       .long   do_IRQ  !       eri2
-       .long   do_IRQ  !       rxi2
-       .long   do_IRQ  !       bri2
-       .long   do_IRQ  !       txi2
-       .long   do_IRQ  ! SIM   simeri  /* C00 */
-       .long   do_IRQ  !       simrxi
-       .long   do_IRQ  !       simtxi
-       .long   do_IRQ  !       simtei
-       .long   do_IRQ  ! HSPI  spii
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! MMCIF mmci0   /* D00 */
-       .long   do_IRQ  !       mmci1
-       .long   do_IRQ  !       mmci2
-       .long   do_IRQ  !       mmci3
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error                 /* E00 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! MFI   mfii
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error                 /* F00 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! ADC   adi
-       .long   do_IRQ  ! CMT   cmti    /* FA0 */
-#elif defined(CONFIG_CPU_SUBTYPE_SH73180) || defined(CONFIG_CPU_SUBTYPE_SH7343)
-       .long   do_IRQ  !  50 0x840
-       .long   do_IRQ  !  51 0x860
-       .long   do_IRQ  !  52 0x880
-       .long   do_IRQ  !  53 0x8a0
-       .long   do_IRQ  !  54 0x8c0
-       .long   do_IRQ  !  55 0x8e0
-       .long   do_IRQ  !  56 0x900
-       .long   do_IRQ  !  57 0x920
-       .long   do_IRQ  !  58 0x940
-       .long   do_IRQ  !  59 0x960
-       .long   do_IRQ  !  60 0x980
-       .long   do_IRQ  !  61 0x9a0
-       .long   do_IRQ  !  62 0x9c0
-       .long   do_IRQ  !  63 0x9e0
-       .long   do_IRQ  !  64 0xa00
-       .long   do_IRQ  !  65 0xa20
-       .long   do_IRQ  !  66 0xa40
-       .long   do_IRQ  !  67 0xa60
-       .long   do_IRQ  !  68 0xa80
-       .long   do_IRQ  !  69 0xaa0
-       .long   do_IRQ  !  70 0xac0
-       .long   do_IRQ  !  71 0xae0
-       .long   do_IRQ  !  72 0xb00
-       .long   do_IRQ  !  73 0xb20
-       .long   do_IRQ  !  74 0xb40
-       .long   do_IRQ  !  75 0xb60
-       .long   do_IRQ  !  76 0xb80
-       .long   do_IRQ  !  77 0xba0
-       .long   do_IRQ  !  78 0xbc0
-       .long   do_IRQ  !  79 0xbe0
-       .long   do_IRQ  !  80 0xc00
-       .long   do_IRQ  !  81 0xc20
-       .long   do_IRQ  !  82 0xc40
-       .long   do_IRQ  !  83 0xc60
-       .long   do_IRQ  !  84 0xc80
-       .long   do_IRQ  !  85 0xca0
-       .long   do_IRQ  !  86 0xcc0
-       .long   do_IRQ  !  87 0xce0
-       .long   do_IRQ  !  88 0xd00
-       .long   do_IRQ  !  89 0xd20
-       .long   do_IRQ  !  90 0xd40
-       .long   do_IRQ  !  91 0xd60
-       .long   do_IRQ  !  92 0xd80
-       .long   do_IRQ  !  93 0xda0
-       .long   do_IRQ  !  94 0xdc0
-       .long   do_IRQ  !  95 0xde0
-       .long   do_IRQ  !  96 0xe00
-       .long   do_IRQ  !  97 0xe20
-       .long   do_IRQ  !  98 0xe40
-       .long   do_IRQ  !  99 0xe60
-       .long   do_IRQ  ! 100 0xe80
-       .long   do_IRQ  ! 101 0xea0
-       .long   do_IRQ  ! 102 0xec0
-       .long   do_IRQ  ! 103 0xee0
-       .long   do_IRQ  ! 104 0xf00
-       .long   do_IRQ  ! 105 0xf20
-       .long   do_IRQ  ! 106 0xf40
-       .long   do_IRQ  ! 107 0xf60
-       .long   do_IRQ  ! 108 0xf80
-#elif defined(CONFIG_CPU_SUBTYPE_ST40STB1)
-       .long   exception_error                 !  50 0x840
-       .long   exception_error                 !  51 0x860
-       .long   exception_error                 !  52 0x880
-       .long   exception_error                 !  53 0x8a0
-       .long   exception_error                 !  54 0x8c0
-       .long   exception_error                 !  55 0x8e0
-       .long   exception_error                 !  56 0x900
-       .long   exception_error                 !  57 0x920
-       .long   exception_error                 !  58 0x940
-       .long   exception_error                 !  59 0x960
-       .long   exception_error                 !  60 0x980
-       .long   exception_error                 !  61 0x9a0
-       .long   exception_error                 !  62 0x9c0
-       .long   exception_error                 !  63 0x9e0
-       .long   do_IRQ  !  64 0xa00 PCI serr
-       .long   do_IRQ  !  65 0xa20     err
-       .long   do_IRQ  !  66 0xa40     ad
-       .long   do_IRQ  !  67 0xa60     pwr_dwn
-       .long   exception_error                 !  68 0xa80
-       .long   exception_error                 !  69 0xaa0
-       .long   exception_error                 !  70 0xac0
-       .long   exception_error                 !  71 0xae0
-       .long   do_IRQ  !  72 0xb00 DMA INT0
-       .long   do_IRQ  !  73 0xb20     INT1
-       .long   do_IRQ  !  74 0xb40     INT2
-       .long   do_IRQ  !  75 0xb60     INT3
-       .long   do_IRQ  !  76 0xb80     INT4
-       .long   exception_error                 !  77 0xba0
-       .long   do_IRQ  !  78 0xbc0 DMA ERR
-       .long   exception_error                 !  79 0xbe0
-       .long   do_IRQ  !  80 0xc00 PIO0
-       .long   do_IRQ  !  81 0xc20 PIO1
-       .long   do_IRQ  !  82 0xc40 PIO2
-       .long   exception_error                 !  83 0xc60
-       .long   exception_error                 !  84 0xc80
-       .long   exception_error                 !  85 0xca0
-       .long   exception_error                 !  86 0xcc0
-       .long   exception_error                 !  87 0xce0
-       .long   exception_error                 !  88 0xd00
-       .long   exception_error                 !  89 0xd20
-       .long   exception_error                 !  90 0xd40
-       .long   exception_error                 !  91 0xd60
-       .long   exception_error                 !  92 0xd80
-       .long   exception_error                 !  93 0xda0
-       .long   exception_error                 !  94 0xdc0
-       .long   exception_error                 !  95 0xde0
-       .long   exception_error                 !  96 0xe00
-       .long   exception_error                 !  97 0xe20
-       .long   exception_error                 !  98 0xe40
-       .long   exception_error                 !  99 0xe60
-       .long   exception_error                 ! 100 0xe80
-       .long   exception_error                 ! 101 0xea0
-       .long   exception_error                 ! 102 0xec0
-       .long   exception_error                 ! 103 0xee0
-       .long   exception_error                 ! 104 0xf00
-       .long   exception_error                 ! 105 0xf20
-       .long   exception_error                 ! 106 0xf40
-       .long   exception_error                 ! 107 0xf60
-       .long   exception_error                 ! 108 0xf80
-       .long   exception_error                 ! 109 0xfa0
-       .long   exception_error                 ! 110 0xfc0
-       .long   exception_error                 ! 111 0xfe0
-       .long   do_IRQ  ! 112 0x1000 Mailbox
-       .long   exception_error                 ! 113 0x1020
-       .long   exception_error                 ! 114 0x1040
-       .long   exception_error                 ! 115 0x1060
-       .long   exception_error                 ! 116 0x1080
-       .long   exception_error                 ! 117 0x10a0
-       .long   exception_error                 ! 118 0x10c0
-       .long   exception_error                 ! 119 0x10e0
-       .long   exception_error                 ! 120 0x1100
-       .long   exception_error                 ! 121 0x1120
-       .long   exception_error                 ! 122 0x1140
-       .long   exception_error                 ! 123 0x1160
-       .long   exception_error                 ! 124 0x1180
-       .long   exception_error                 ! 125 0x11a0
-       .long   exception_error                 ! 126 0x11c0
-       .long   exception_error                 ! 127 0x11e0
-       .long   exception_error                 ! 128 0x1200
-       .long   exception_error                 ! 129 0x1220
-       .long   exception_error                 ! 130 0x1240
-       .long   exception_error                 ! 131 0x1260
-       .long   exception_error                 ! 132 0x1280
-       .long   exception_error                 ! 133 0x12a0
-       .long   exception_error                 ! 134 0x12c0
-       .long   exception_error                 ! 135 0x12e0
-       .long   exception_error                 ! 136 0x1300
-       .long   exception_error                 ! 137 0x1320
-       .long   exception_error                 ! 138 0x1340
-       .long   exception_error                 ! 139 0x1360
-       .long   do_IRQ  ! 140 0x1380 EMPI INV_ADDR
-       .long   exception_error                 ! 141 0x13a0
-       .long   exception_error                 ! 142 0x13c0
-       .long   exception_error                 ! 143 0x13e0
-#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
-       .long   do_IRQ  !  50 0x840
-       .long   do_IRQ  !  51 0x860
-       .long   do_IRQ  !  52 0x880
-       .long   do_IRQ  !  53 0x8a0
-       .long   do_IRQ  !  54 0x8c0
-       .long   do_IRQ  !  55 0x8e0
-       .long   do_IRQ  !  56 0x900
-       .long   do_IRQ  !  57 0x920
-       .long   do_IRQ  !  58 0x940
-       .long   do_IRQ  !  59 0x960
-       .long   do_IRQ  !  60 0x980
-       .long   do_IRQ  !  61 0x9a0
-       .long   do_IRQ  !  62 0x9c0
-       .long   do_IRQ  !  63 0x9e0
-       .long   do_IRQ  !  64 0xa00
-       .long   do_IRQ  !  65 0xa20
-       .long   do_IRQ  !  66 0xa4d
-       .long   do_IRQ  !  67 0xa60
-       .long   do_IRQ  !  68 0xa80
-       .long   do_IRQ  !  69 0xaa0
-       .long   do_IRQ  !  70 0xac0
-       .long   do_IRQ  !  71 0xae0
-       .long   do_IRQ  !  72 0xb00
-       .long   do_IRQ  !  73 0xb20
-       .long   do_IRQ  !  74 0xb40
-       .long   do_IRQ  !  75 0xb60
-       .long   do_IRQ  !  76 0xb80
-       .long   do_IRQ  !  77 0xba0
-       .long   do_IRQ  !  78 0xbc0
-       .long   do_IRQ  !  79 0xbe0
-       .long   do_IRQ  !  80 0xc00
-       .long   do_IRQ  !  81 0xc20
-       .long   do_IRQ  !  82 0xc40
-       .long   do_IRQ  !  83 0xc60
-       .long   do_IRQ  !  84 0xc80
-       .long   do_IRQ  !  85 0xca0
-       .long   do_IRQ  !  86 0xcc0
-       .long   do_IRQ  !  87 0xce0
-       .long   do_IRQ  !  88 0xd00
-       .long   do_IRQ  !  89 0xd20
-       .long   do_IRQ  !  90 0xd40
-       .long   do_IRQ  !  91 0xd60
-       .long   do_IRQ  !  92 0xd80
-       .long   do_IRQ  !  93 0xda0
-       .long   do_IRQ  !  94 0xdc0
-       .long   do_IRQ  !  95 0xde0
-       .long   do_IRQ  !  96 0xe00
-       .long   do_IRQ  !  97 0xe20
-       .long   do_IRQ  !  98 0xe40
-       .long   do_IRQ  !  99 0xe60
-       .long   do_IRQ  ! 100 0xe80
-       .long   do_IRQ  ! 101 0xea0
-       .long   do_IRQ  ! 102 0xec0
-       .long   do_IRQ  ! 103 0xee0
-       .long   do_IRQ  ! 104 0xf00
-       .long   do_IRQ  ! 105 0xf20
-       .long   do_IRQ  ! 106 0xf40
-       .long   do_IRQ  ! 107 0xf60
-       .long   do_IRQ  ! 108 0xf80
-#endif
-#else
-       .long   exception_error         /* 400 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! RTC   ati
-       .long   do_IRQ  !       pri
-       .long   do_IRQ  !       cui
-       .long   exception_error
-       .long   exception_error         /* 500 */
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! WDT   iti     /* 560 */
-       .long   do_IRQ  ! TMU-ch0
-       .long   do_IRQ  ! TMU-ch1
-       .long   do_IRQ  ! TMU-ch2
-       .long   do_IRQ  ! ticpi2        /* 5E0 */
-       .long   do_IRQ  ! 32 Hitachi UDI        /* 600 */
-       .long   exception_error
-       .long   do_IRQ  ! 34 DMAC dmte0
-       .long   do_IRQ  ! 35      dmte1
-       .long   do_IRQ  ! 36      dmte2
-       .long   do_IRQ  ! 37      dmte3
-       .long   do_IRQ  ! 38      dmae
-       .long   exception_error                 ! 39    /* 6E0 */
-       .long   do_IRQ  ! 40 SCIF-ch0 eri               /* 700 */
-       .long   do_IRQ  ! 41          rxi
-       .long   do_IRQ  ! 42          bri
-       .long   do_IRQ  ! 43          txi
-       .long   do_IRQ  ! 44 DMAC dmte4         /* 780 */
-       .long   do_IRQ  ! 45      dmte5
-       .long   do_IRQ  ! 46      dmte6
-       .long   do_IRQ  ! 47      dmte7         /* 7E0 */
-#if defined(CONFIG_SH_FPU)
-       .long   do_fpu_state_restore    ! 48    /* 800 */
-       .long   do_fpu_state_restore    ! 49    /* 820 */
-#else
-       .long   exception_error
-       .long   exception_error
-#endif
-       .long   exception_error                 /* 840 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! 56 CMT        /* 900 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! 60 HAC
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! PCI serr      /* A00 */
-       .long   do_IRQ  !     INTA
-       .long   do_IRQ  !     INTB
-       .long   do_IRQ  !     INTC
-       .long   do_IRQ  !     INTD
-       .long   do_IRQ  !     err
-       .long   do_IRQ  !     pwd3
-       .long   do_IRQ  !     pwd2
-       .long   do_IRQ  !     pwd1      /* B00 */
-       .long   do_IRQ  !     pwd0
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! SCIF-ch1 eri  /* B80 */
-       .long   do_IRQ  !          rxi
-       .long   do_IRQ  !          bri
-       .long   do_IRQ  !          txi
-       .long   do_IRQ  ! SIOF          /* C00 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! HSPI          /* C80 */
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! MMCIF fatat   /* D00 */
-       .long   do_IRQ  !       tran
-       .long   do_IRQ  !       err
-       .long   do_IRQ  !       frdy
-       .long   do_IRQ  ! DMAC dmint8   /* D80 */
-       .long   do_IRQ  !      dmint9
-       .long   do_IRQ  !      dmint10
-       .long   do_IRQ  !      dmint11
-       .long   do_IRQ  ! TMU-ch3       /* E00 */
-       .long   do_IRQ  ! TMU-ch4
-       .long   do_IRQ  ! TMU-ch5
-       .long   exception_error
-       .long   do_IRQ  ! SSI
-       .long   exception_error
-       .long   exception_error
-       .long   exception_error
-       .long   do_IRQ  ! FLCTL flste   /* F00 */
-       .long   do_IRQ  !       fltend
-       .long   do_IRQ  !       fltrq0
-       .long   do_IRQ  !       fltrq1
-       .long   do_IRQ  ! GPIO gpioi0   /* F80 */
-       .long   do_IRQ  !      gpioi1
-       .long   do_IRQ  !      gpioi2
-       .long   do_IRQ  !      gpioi3
-#endif
-
index 97c571fbcdf13a5e2922867c143bf55dcb432292..39aaefb2d83f427d58a0b05ad48a45868dfba2e2 100644 (file)
@@ -1,9 +1,8 @@
-/* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $
- *
+/*
  *  linux/arch/sh/entry.S
  *
  *  Copyright (C) 1999, 2000, 2002  Niibe Yutaka
- *  Copyright (C) 2003  Paul Mundt
+ *  Copyright (C) 2003 - 2006  Paul Mundt
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
@@ -78,7 +77,6 @@ OFF_TRA       =  (16*4+6*4)
 #define k3     r3
 #define k4     r4
 
-#define k_ex_code      r2_bank /* r2_bank1 */
 #define g_imask                r6      /* r6_bank1 */
 #define k_g_imask      r6_bank /* r6_bank1 */
 #define current                r7      /* r7_bank1 */
@@ -691,7 +689,7 @@ interrupt:
 0:
 #endif /* defined(CONFIG_KGDB_NMI) */
        bra     handle_exception
-        mov.l  @k2, k2
+        mov    #-1, k2         ! interrupt exception marker
 
        .align  2
 1:     .long   EXPEVT
@@ -717,8 +715,7 @@ ENTRY(handle_exception)
        add     current, k1
        mov     k1, r15         ! change to kernel stack
        !
-1:     mov     #-1, k4
-       mov.l   2f, k1
+1:     mov.l   2f, k1
        !
 #ifdef CONFIG_SH_DSP
        mov.l   r2, @-r15               ! Save r2, we need another reg
@@ -763,6 +760,8 @@ skip_save:
 #endif
        ! Save the user registers on the stack.
        mov.l   k2, @-r15       ! EXPEVT
+
+       mov     #-1, k4
        mov.l   k4, @-r15       ! set TRA (default: -1)
        !
        sts.l   macl, @-r15
@@ -797,8 +796,21 @@ skip_save:
        mov.l   r2, @-r15
        mov.l   r1, @-r15
        mov.l   r0, @-r15
-       ! Then, dispatch to the handler, according to the exception code.
-       stc     k_ex_code, r8
+
+       /*
+        * This gets a bit tricky.. in the INTEVT case we don't want to use
+        * the VBR offset as a destination in the jump call table, since all
+        * of the destinations are the same. In this case, (interrupt) sets
+        * a marker in r2 (now r2_bank since SR.RB changed), which we check
+        * to determine the exception type. For all other exceptions, we
+        * forcibly read EXPEVT from memory and fix up the jump address, in
+        * the interrupt exception case we jump to do_IRQ() and defer the
+        * INTEVT read until there. As a bonus, we can also clean up the SR.RB
+        * checks that do_IRQ() was doing..
+        */
+       stc     r2_bank, r8
+       cmp/pz  r8
+       bf      interrupt_exception
        shlr2   r8
        shlr    r8
        mov.l   4f, r9
@@ -806,6 +818,8 @@ skip_save:
        mov.l   @r9, r9
        jmp     @r9
         nop
+       rts
+        nop
 
        .align  2
 1:     .long   0x00001000      ! DSP=1
@@ -813,8 +827,17 @@ skip_save:
 3:     .long   0xcfffffff      ! RB=0, BL=0
 4:     .long   exception_handling_table
 
+interrupt_exception:
+       mov.l   1f, r9
+       jmp     @r9
+        nop
+       rts
+        nop
+
+       .align 2
+1:     .long   do_IRQ
+
        .align  2
 ENTRY(exception_none)
        rts
         nop
-
index c7ebd6aec9514cdb4f16583a94a6570e01262440..acf2602569c4928a5ecd6dc023ede793afd64d2e 100644 (file)
 #include <linux/module.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
+#include <linux/io.h>
 #include <asm/irq.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/thread_info.h>
 #include <asm/cpu/mmu_context.h>
 
+atomic_t irq_err_count;
+
 /*
  * 'what should we do if we get a hw irq event on an illegal vector'.
  * each architecture has to answer this themselves, it doesn't deserve
@@ -24,6 +27,7 @@
  */
 void ack_bad_irq(unsigned int irq)
 {
+       atomic_inc(&irq_err_count);
        printk("unexpected IRQ trap at vector %02x\n", irq);
 }
 
@@ -47,8 +51,10 @@ int show_interrupts(struct seq_file *p, void *v)
                if (!action)
                        goto unlock;
                seq_printf(p, "%3d: ",i);
-               seq_printf(p, "%10u ", kstat_irqs(i));
-               seq_printf(p, " %14s", irq_desc[i].chip->typename);
+               for_each_online_cpu(j)
+                       seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+               seq_printf(p, " %14s", irq_desc[i].chip->name);
+               seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq));
                seq_printf(p, "  %s", action->name);
 
                for (action=action->next; action; action = action->next)
@@ -56,7 +62,9 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_putc(p, '\n');
 unlock:
                spin_unlock_irqrestore(&irq_desc[i].lock, flags);
-       }
+       } else if (i == NR_IRQS)
+               seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
+
        return 0;
 }
 #endif
@@ -78,7 +86,8 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
                      unsigned long r6, unsigned long r7,
                      struct pt_regs regs)
 {
-       int irq = r4;
+       struct pt_regs *old_regs = set_irq_regs(&regs);
+       int irq;
 #ifdef CONFIG_4KSTACKS
        union irq_ctx *curctx, *irqctx;
 #endif
@@ -102,20 +111,9 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
 #endif
 
 #ifdef CONFIG_CPU_HAS_INTEVT
-       __asm__ __volatile__ (
-#ifdef CONFIG_CPU_HAS_SR_RB
-               "stc    r2_bank, %0\n\t"
+       irq = (ctrl_inl(INTEVT) >> 5) - 16;
 #else
-               "mov.l  @%1, %0\n\t"
-#endif
-               "shlr2  %0\n\t"
-               "shlr2  %0\n\t"
-               "shlr   %0\n\t"
-               "add    #-16, %0\n\t"
-               : "=z" (irq), "=r" (r4)
-               : "1" (INTEVT)
-               : "memory"
-       );
+       irq = r4;
 #endif
 
        irq = irq_demux(irq);
@@ -139,25 +137,25 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
 
                __asm__ __volatile__ (
                        "mov    %0, r4          \n"
-                       "mov    %1, r5          \n"
                        "mov    r15, r9         \n"
-                       "jsr    @%2             \n"
+                       "jsr    @%1             \n"
                        /* swith to the irq stack */
-                       " mov   %3, r15         \n"
+                       " mov   %2, r15         \n"
                        /* restore the stack (ring zero) */
                        "mov    r9, r15         \n"
                        : /* no outputs */
-                       : "r" (irq), "r" (&regs), "r" (__do_IRQ), "r" (isp)
+                       : "r" (irq), "r" (generic_handle_irq), "r" (isp)
                        /* XXX: A somewhat excessive clobber list? -PFM */
                        : "memory", "r0", "r1", "r2", "r3", "r4",
                          "r5", "r6", "r7", "r8", "t", "pr"
                );
        } else
 #endif
-               __do_IRQ(irq, &regs);
+               generic_handle_irq(irq);
 
        irq_exit();
 
+       set_irq_regs(old_regs);
        return 1;
 }
 
index 0b1d5dd7a93b4237c70f3681d5745cac2296e3bd..91516dca4a857180dacb51bd37b68c72ab3630e4 100644 (file)
@@ -5,6 +5,7 @@
  *  Copyright (C) 1995  Linus Torvalds
  *
  *  SuperH version:  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
+ *                  Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
  */
 
 /*
@@ -290,6 +291,24 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
 static void
 ubc_set_tracing(int asid, unsigned long pc)
 {
+#if defined(CONFIG_CPU_SH4A)
+       unsigned long val;
+
+       val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
+       val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
+
+       ctrl_outl(val, UBC_CBR0);
+       ctrl_outl(pc,  UBC_CAR0);
+       ctrl_outl(0x0, UBC_CAMR0);
+       ctrl_outl(0x0, UBC_CBCR);
+
+       val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
+       ctrl_outl(val, UBC_CRR0);
+
+       /* Read UBC register that we writed last. For chekking UBC Register changed */
+       val = ctrl_inl(UBC_CRR0);
+
+#else  /* CONFIG_CPU_SH4A */
        ctrl_outl(pc, UBC_BARA);
 
 #ifdef CONFIG_MMU
@@ -307,6 +326,7 @@ ubc_set_tracing(int asid, unsigned long pc)
                ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
                ctrl_outw(BRCR_PCBA, UBC_BRCR);
        }
+#endif /* CONFIG_CPU_SH4A */
 }
 
 /*
@@ -359,8 +379,13 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
 #endif
                ubc_set_tracing(asid, next->thread.ubc_pc);
        } else {
+#if defined(CONFIG_CPU_SH4A)
+               ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
+               ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
+#else
                ctrl_outw(0, UBC_BBRA);
                ctrl_outw(0, UBC_BBRB);
+#endif
        }
 
        return prev;
@@ -460,8 +485,13 @@ asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
                                 struct pt_regs regs)
 {
        /* Clear tracing.  */
+#if defined(CONFIG_CPU_SH4A)
+       ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
+       ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
+#else
        ctrl_outw(0, UBC_BBRA);
        ctrl_outw(0, UBC_BBRB);
+#endif
        current->thread.ubc_pc = 0;
        ubc_usercnt -= 1;
 
index 450c68f1df052f5ccba2eaef9d0149a1bc2390bf..57e708d7b52df705cdf6ebcf48d1608485dc2abd 100644 (file)
@@ -47,6 +47,7 @@ unsigned long long __attribute__ ((weak)) sched_clock(void)
        return (unsigned long long)jiffies * (1000000000 / HZ);
 }
 
+#ifndef CONFIG_GENERIC_TIME
 void do_gettimeofday(struct timeval *tv)
 {
        unsigned long seq;
@@ -99,6 +100,7 @@ int do_settimeofday(struct timespec *tv)
        return 0;
 }
 EXPORT_SYMBOL(do_settimeofday);
+#endif /* !CONFIG_GENERIC_TIME */
 
 /* last time the RTC clock got updated */
 static long last_rtc_update;
@@ -107,13 +109,14 @@ static long last_rtc_update;
  * handle_timer_tick() needs to keep up the real-time clock,
  * as well as call the "do_timer()" routine every clocktick
  */
-void handle_timer_tick(struct pt_regs *regs)
+void handle_timer_tick(void)
 {
        do_timer(1);
 #ifndef CONFIG_SMP
-       update_process_times(user_mode(regs));
+       update_process_times(user_mode(get_irq_regs()));
 #endif
-       profile_tick(CPU_PROFILING, regs);
+       if (current->pid)
+               profile_tick(CPU_PROFILING);
 
 #ifdef CONFIG_HEARTBEAT
        if (sh_mv.mv_heartbeat != NULL)
index 205816fcf0da50d7fecdeecd5975c2fbaf6c0a9e..24927015dc31fc901153935e91eca80e32332c67 100644 (file)
@@ -80,8 +80,7 @@ static unsigned long tmu_timer_get_offset(void)
        return count;
 }
 
-static irqreturn_t tmu_timer_interrupt(int irq, void *dev_id,
-                                      struct pt_regs *regs)
+static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
 {
        unsigned long timer_status;
 
@@ -98,7 +97,7 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dev_id,
         * locally disabled. -arca
         */
        write_seqlock(&xtime_lock);
-       handle_timer_tick(regs);
+       handle_timer_tick();
        write_sequnlock(&xtime_lock);
 
        return IRQ_HANDLED;
@@ -111,60 +110,6 @@ static struct irqaction tmu_irq = {
        .mask           = CPU_MASK_NONE,
 };
 
-/*
- * Hah!  We'll see if this works (switching from usecs to nsecs).
- */
-static unsigned long tmu_timer_get_frequency(void)
-{
-       u32 freq;
-       struct timespec ts1, ts2;
-       unsigned long diff_nsec;
-       unsigned long factor;
-
-       /* Setup the timer:  We don't want to generate interrupts, just
-        * have it count down at its natural rate.
-        */
-       ctrl_outb(0, TMU_TSTR);
-#if !defined(CONFIG_CPU_SUBTYPE_SH7300) && !defined(CONFIG_CPU_SUBTYPE_SH7760)
-       ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
-#endif
-       ctrl_outw(TMU0_TCR_CALIB, TMU0_TCR);
-       ctrl_outl(0xffffffff, TMU0_TCOR);
-       ctrl_outl(0xffffffff, TMU0_TCNT);
-
-       rtc_sh_get_time(&ts2);
-
-       do {
-               rtc_sh_get_time(&ts1);
-       } while (ts1.tv_nsec == ts2.tv_nsec && ts1.tv_sec == ts2.tv_sec);
-
-       /* actually start the timer */
-       ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
-
-       do {
-               rtc_sh_get_time(&ts2);
-       } while (ts1.tv_nsec == ts2.tv_nsec && ts1.tv_sec == ts2.tv_sec);
-
-       freq = 0xffffffff - ctrl_inl(TMU0_TCNT);
-       if (ts2.tv_nsec < ts1.tv_nsec) {
-               ts2.tv_nsec += 1000000000;
-               ts2.tv_sec--;
-       }
-
-       diff_nsec = (ts2.tv_sec - ts1.tv_sec) * 1000000000 + (ts2.tv_nsec - ts1.tv_nsec);
-
-       /* this should work well if the RTC has a precision of n Hz, where
-        * n is an integer.  I don't think we have to worry about the other
-        * cases. */
-       factor = (1000000000 + diff_nsec/2) / diff_nsec;
-
-       if (factor * diff_nsec > 1100000000 ||
-           factor * diff_nsec <  900000000)
-               panic("weird RTC (diff_nsec %ld)", diff_nsec);
-
-       return freq * factor;
-}
-
 static void tmu_clk_init(struct clk *clk)
 {
        u8 divisor = TMU0_TCR_INIT & 0x7;
@@ -232,12 +177,12 @@ struct sys_timer_ops tmu_timer_ops = {
        .init           = tmu_timer_init,
        .start          = tmu_timer_start,
        .stop           = tmu_timer_stop,
-       .get_frequency  = tmu_timer_get_frequency,
+#ifndef CONFIG_GENERIC_TIME
        .get_offset     = tmu_timer_get_offset,
+#endif
 };
 
 struct sys_timer tmu_timer = {
        .name   = "tmu",
        .ops    = &tmu_timer_ops,
 };
-
index c81e6b67ad300e9e8b23c5adfc1658184d1ea59e..38c82d890ffda6a5cb332ad3b387bde0476dc551 100644 (file)
@@ -28,6 +28,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
        split_page(page, order);
 
        ret = page_address(page);
+       memset(ret, 0, size);
        *handle = virt_to_phys(ret);
 
        /*
index b4e50ae323bf1a2587e830625240eb5655872e53..207f1b6eef5317baee833ff636488ed6fcbd36bb 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/pcic.h>
 #include <asm/timer.h>
 #include <asm/uaccess.h>
+#include <asm/irq_regs.h>
 
 
 unsigned int pcic_pin_to_irq(unsigned int pin, char *name);
index 0251cab4708bbcf31a23f93e6d942e08f54bfc9b..f5ee1ac834bcf38c58de7f799d5054e734507fa1 100644 (file)
@@ -121,16 +121,6 @@ static struct console prom_debug_console = {
        .index =        -1,
 };
 
-int obp_system_intr(void)
-{
-       if (boot_flags & BOOTME_DEBUG) {
-               printk("OBP: system interrupted\n");
-               prom_halt();
-               return 1;
-       }
-       return 0;
-}
-
 /* 
  * Process kernel command line switches that are specific to the
  * SPARC or that require special low-level processing.
index 346c19a949fd0245c2ca29fe3d9f0f72e7a2219f..1dd78c84888a3e1556645537db9c3f549d62f92d 100644 (file)
@@ -36,11 +36,11 @@ SECTIONS
 
   . = ALIGN(4096);
   __init_begin = .;
+  _sinittext = .;
   .init.text : { 
-       _sinittext = .;
        *(.init.text)
-       _einittext = .;
   }
+  _einittext = .;
   __init_text_end = .;
   .init.data : { *(.init.data) }
   . = ALIGN(16);
index b27a506309eed9608e0154be1398f0d257df2564..0df7121cef07b768d8fe0090609f994edcca3ac5 100644 (file)
@@ -402,7 +402,7 @@ void srmmu_nocache_calcsize(void)
        srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
 }
 
-void srmmu_nocache_init(void)
+void __init srmmu_nocache_init(void)
 {
        unsigned int bitmap_bits;
        pgd_t *pgd;
index 958287448cfe89f1cec527fa93d15c4808bf5495..cc8ad480a204152256f97c8b44e15f3f3144c6b4 100644 (file)
@@ -91,16 +91,6 @@ void kernel_enter_debugger(void)
 {
 }
 
-int obp_system_intr(void)
-{
-       if (boot_flags & BOOTME_DEBUG) {
-               printk("OBP: system interrupted\n");
-               prom_halt();
-               return 1;
-       }
-       return 0;
-}
-
 /* 
  * Process kernel command line switches that are specific to the
  * SPARC or that require special low-level processing.
index d75307589d74c144f39d713fea8749242e36e29f..78fb619bdb732fcf2bd6de5e99894840af5a3db8 100644 (file)
@@ -25,6 +25,19 @@ config PCI
 config PCMCIA
        bool
 
+# Yet to do!
+config TRACE_IRQFLAGS_SUPPORT
+       bool
+       default n
+
+config LOCKDEP_SUPPORT
+       bool
+       default y
+
+config STACKTRACE_SUPPORT
+       bool
+       default y
+
 config GENERIC_CALIBRATE_DELAY
        bool
        default y
@@ -37,13 +50,15 @@ config IRQ_RELEASE_METHOD
 menu "UML-specific options"
 
 config MODE_TT
-       bool "Tracing thread support"
+       bool "Tracing thread support (DEPRECATED)"
        default n
        help
        This option controls whether tracing thread support is compiled
-       into UML.  This option is largely obsolete, given that skas0 provides
+       into UML. This option is largely obsolete, given that skas0 provides
        skas security and performance without needing to patch the host.
-       It is safe to say 'N' here.
+       It is safe to say 'N' here; saying 'Y' may cause additional problems
+       with the resulting binary even if you run UML in SKAS mode, and running
+       in TT mode is strongly *NOT RECOMMENDED*.
 
 config STATIC_LINK
        bool "Force a static link"
@@ -56,6 +71,9 @@ config STATIC_LINK
        for use in a chroot jail.  So, if you intend to run UML inside a
        chroot, and you disable CONFIG_MODE_TT, you probably want to say Y
        here.
+       Additionally, this option enables using higher memory spaces (up to
+       2.75G) for UML - disabling CONFIG_MODE_TT and enabling this option leads
+       to best results for this.
 
 config KERNEL_HALF_GIGS
        int "Kernel address space size (in .5G units)"
@@ -72,10 +90,13 @@ config MODE_SKAS
        default y
        help
        This option controls whether skas (separate kernel address space)
-       support is compiled in.  If you have applied the skas patch to the
-       host, then you certainly want to say Y here (and consider saying N
-       to CONFIG_MODE_TT).  Otherwise, it is safe to say Y.  Disabling this
-       option will shrink the UML binary slightly.
+       support is compiled in.
+       Unless you have specific needs to use TT mode (which applies almost only
+       to developers), you should say Y here.
+       SKAS mode will make use of the SKAS3 patch if it is applied on the host
+       (and your UML will run in SKAS3 mode), but if no SKAS patch is applied
+       on the host it will run in SKAS0 mode, which is anyway faster than TT
+       mode.
 
 source "arch/um/Kconfig.arch"
 source "mm/Kconfig"
index f6eb72d117b9e182d059fc7013a3bc1b596ebaaa..f191a550a079cc0b8340f69a67dffa5e33dd85a6 100644 (file)
@@ -16,23 +16,42 @@ config SEMAPHORE_SLEEPERS
        bool
        default y
 
-config HOST_2G_2G
-       bool "2G/2G host address space split"
-       default n
-       help
-       This is needed when the host on which you run has a 2G/2G memory
-       split, instead of the customary 3G/1G.
-
-       Note that to enable such a host
-       configuration, which makes sense only in some cases, you need special
-       host patches.
-
-       So, if you do not know what to do here, say 'N'.
+choice
+       prompt "Host memory split"
+       default HOST_VMSPLIT_3G
+       ---help---
+          This is needed when the host kernel on which you run has a non-default
+          (like 2G/2G) memory split, instead of the customary 3G/1G. If you did
+          not recompile your own kernel but use the default distro's one, you can
+          safely accept the "Default split" option.
+
+          It can be enabled on recent (>=2.6.16-rc2) vanilla kernels via
+          CONFIG_VM_SPLIT_*, or on previous kernels with special patches (-ck
+          patchset by Con Kolivas, or other ones) - option names match closely the
+          host CONFIG_VM_SPLIT_* ones.
+
+          A lower setting (where 1G/3G is lowest and 3G/1G is higher) will
+          tolerate even more "normal" host kernels, but an higher setting will be
+          stricter.
+
+          So, if you do not know what to do here, say 'Default split'.
+
+       config HOST_VMSPLIT_3G
+               bool "Default split (3G/1G user/kernel host split)"
+       config HOST_VMSPLIT_3G_OPT
+               bool "3G/1G user/kernel host split (for full 1G low memory)"
+       config HOST_VMSPLIT_2G
+               bool "2G/2G user/kernel host split"
+       config HOST_VMSPLIT_1G
+               bool "1G/3G user/kernel host split"
+endchoice
 
 config TOP_ADDR
-       hex
-       default 0xc0000000 if !HOST_2G_2G
-       default 0x80000000 if HOST_2G_2G
+       hex
+       default 0xB0000000 if HOST_VMSPLIT_3G_OPT
+       default 0x78000000 if HOST_VMSPLIT_2G
+       default 0x40000000 if HOST_VMSPLIT_1G
+       default 0xC0000000
 
 config 3_LEVEL_PGTABLES
        bool "Three-level pagetables (EXPERIMENTAL)"
index 11154b6773ec230302341d8889e79601677b47c1..d278682dd799dc51e47cad1341c62f34bae58e19 100644 (file)
@@ -1,10 +1,10 @@
 # Copyright 2003 - 2004 Pathscale, Inc
 # Released under the GPL
 
-core-y += arch/um/sys-x86_64/
+core-y += arch/um/sys-x86_64/ arch/x86_64/crypto/
 START := 0x60000000
 
-_extra_flags_ = -fno-builtin -m64 -mcmodel=kernel
+_extra_flags_ = -fno-builtin -m64
 
 #We #undef __x86_64__ for kernelspace, not for userspace where
 #it's needed for headers to work!
index 356390d1f8b945dbedbb3ce11475fdb309b95926..461175f8b1d9da4b4c14f21616ea4ec2016e1409 100644 (file)
@@ -1,9 +1,16 @@
 /* for use by sys-$SUBARCH/kernel-offsets.c */
 
+DEFINE(KERNEL_MADV_REMOVE, MADV_REMOVE);
+#ifdef CONFIG_MODE_TT
+OFFSET(HOST_TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid);
+#endif
+
 OFFSET(HOST_TASK_REGS, task_struct, thread.regs);
 OFFSET(HOST_TASK_PID, task_struct, pid);
+
 DEFINE(UM_KERN_PAGE_SIZE, PAGE_SIZE);
 DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC);
+
 DEFINE_STR(UM_KERN_EMERG, KERN_EMERG);
 DEFINE_STR(UM_KERN_ALERT, KERN_ALERT);
 DEFINE_STR(UM_KERN_CRIT, KERN_CRIT);
@@ -12,6 +19,10 @@ DEFINE_STR(UM_KERN_WARNING, KERN_WARNING);
 DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE);
 DEFINE_STR(UM_KERN_INFO, KERN_INFO);
 DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG);
+
 DEFINE(UM_ELF_CLASS, ELF_CLASS);
 DEFINE(UM_ELFCLASS32, ELFCLASS32);
 DEFINE(UM_ELFCLASS64, ELFCLASS64);
+
+/* For crypto assembler code. */
+DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
index 59cfa9e0cad034739c3add89720269c01f0177c2..cec9fcc57bf51f5169cbee6cf7fdc6f60555d5f8 100644 (file)
@@ -6,7 +6,6 @@
 #ifndef __KERN_UTIL_H__
 #define __KERN_UTIL_H__
 
-#include "linux/threads.h"
 #include "sysdep/ptrace.h"
 #include "sysdep/faultinfo.h"
 
index e93c6d3e893b05c3ca17a9ce78ad91334f2d55cd..e860bc5848e0aaff10b382f185aed8b5a4acd81e 100644 (file)
@@ -12,7 +12,8 @@ extern void longjmp(jmp_buf, int);
 } while(0)
 
 #define UML_SETJMP(buf) ({ \
-       int n, enable;     \
+       int n;     \
+       volatile int enable;    \
        enable = get_signals(); \
        n = setjmp(*buf); \
        if(n != 0) \
index 120ca21a513a03853a38abf2004a35a1d0b4b173..6516f6dca96d35b036a7632cc668418e44468bf2 100644 (file)
@@ -201,6 +201,7 @@ extern int os_getpgrp(void);
 
 #ifdef UML_CONFIG_MODE_TT
 extern void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int));
+extern void stop(void);
 #endif
 extern void init_new_thread_signals(void);
 extern int run_kernel_thread(int (*fn)(void *), void *arg, void **jmp_ptr);
index 2c13de321f2fb330eb5dc162fe53bffca57f3bad..97ec9d894d7539a7ef61c08b4196b01b547d6e91 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/stddef.h>
 #include <linux/sched.h>
 #include <linux/elf.h>
+#include <linux/crypto.h>
 #include <asm/mman.h>
 
 #define DEFINE(sym, val) \
@@ -17,9 +18,5 @@
 void foo(void)
 {
        OFFSET(HOST_TASK_DEBUGREGS, task_struct, thread.arch.debugregs);
-       DEFINE(KERNEL_MADV_REMOVE, MADV_REMOVE);
-#ifdef CONFIG_MODE_TT
-       OFFSET(HOST_TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid);
-#endif
 #include <common-offsets.h>
 }
index 91d129fb39308cc57771ca71d2465ecc1a07553d..a307237b7964394189e220673c160bddf08a6227 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/sched.h>
 #include <linux/time.h>
 #include <linux/elf.h>
+#include <linux/crypto.h>
 #include <asm/page.h>
 #include <asm/mman.h>
 
@@ -18,9 +19,5 @@
 
 void foo(void)
 {
-       DEFINE(KERNEL_MADV_REMOVE, MADV_REMOVE);
-#ifdef CONFIG_MODE_TT
-       OFFSET(HOST_TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid);
-#endif
 #include <common-offsets.h>
 }
index c17eddcf89b3ce8ab627cbdad0f2574cdd3035cb..2c6d090a2e872b11d891c2510a1e4f55ef52dd17 100644 (file)
@@ -60,10 +60,7 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
 #endif
 
        *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
-       /* This is wrong for the code page, but it doesn't matter since the
-        * stub is mapped by hand with the correct permissions.
-        */
-       *pte = pte_mkwrite(*pte);
+       *pte = pte_mkread(*pte);
        return(0);
 
  out_pmd:
index 6c92bbccb49c30658295c9ea226258ce62c3b5af..ed1abcf4d0576584e78e9df965f803ef5030c054 100644 (file)
@@ -4,13 +4,13 @@
  * Licensed under the GPL
  */
 
-#include <setjmp.h>
 #include <string.h>
 #include "user_util.h"
 #include "uml_uaccess.h"
 #include "task.h"
 #include "kern_util.h"
 #include "os.h"
+#include "longjmp.h"
 
 int __do_copy_from_user(void *to, const void *from, int n,
                        void **fault_addr, void **fault_catcher)
@@ -80,10 +80,10 @@ int __do_strnlen_user(const char *str, unsigned long n,
        struct tt_regs save = TASK_REGS(get_current())->tt;
        int ret;
        unsigned long *faddrp = (unsigned long *)fault_addr;
-       sigjmp_buf jbuf;
+       jmp_buf jbuf;
 
        *fault_catcher = &jbuf;
-       if(sigsetjmp(jbuf, 1) == 0)
+       if(UML_SETJMP(&jbuf) == 0)
                ret = strlen(str) + 1;
        else ret = *faddrp - (unsigned long) str;
 
index 5461a065bbb924104dbb395538a6675cbfd9f4e1..3dc3a02d626318ba0337c8471237387099df5729 100644 (file)
@@ -10,7 +10,6 @@
 #include <errno.h>
 #include <stdarg.h>
 #include <stdlib.h>
-#include <setjmp.h>
 #include <sys/time.h>
 #include <sys/ptrace.h>
 #include <linux/ptrace.h>
index 3f5b1514e8a71a33513ccf613643fcb2e33fed5c..56b8a50e8bc2e08c3bfa5b7661b954599f5c6cee 100644 (file)
@@ -80,11 +80,18 @@ void setup_machinename(char *machine_out)
        struct utsname host;
 
        uname(&host);
-#if defined(UML_CONFIG_UML_X86) && !defined(UML_CONFIG_64BIT)
+#ifdef UML_CONFIG_UML_X86
+# ifndef UML_CONFIG_64BIT
        if (!strcmp(host.machine, "x86_64")) {
                strcpy(machine_out, "i686");
                return;
        }
+# else
+       if (!strcmp(host.machine, "i686")) {
+               strcpy(machine_out, "x86_64");
+               return;
+       }
+# endif
 #endif
        strcpy(machine_out, host.machine);
 }
index 8592738082038b0b8d500408fcc0e616793e457e..12c593607c5943666b51d7c71e392299823aec48 100644 (file)
@@ -14,6 +14,3 @@ EXPORT_SYMBOL(__up_wakeup);
 
 /*XXX: we need them because they would be exported by x86_64 */
 EXPORT_SYMBOL(__memcpy);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(ip_compute_csum);
index 1c967026c957dd08e4b81406a82712b60ada8ee6..652fa34c2cd3d3f0a3f7a8074cfb4ca890e56488 100644 (file)
@@ -5,7 +5,6 @@
 
 #include <stddef.h>
 #include <signal.h>
-#include <linux/compiler.h>
 #include <asm/unistd.h>
 #include "uml-config.h"
 #include "sysdep/sigcontext.h"
index 0612a33bb896bb818001d3229c6af6652332c1df..c4ef801b765b8004033c83d2f6b1ac94b822fb9f 100644 (file)
@@ -178,7 +178,8 @@ void make_8259A_irq(unsigned int irq)
 {
        disable_irq_nosync(irq);
        io_apic_irqs &= ~(1<<irq);
-       set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+       set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
+                                     "XT");
        enable_irq(irq);
 }
 
@@ -431,8 +432,8 @@ void __init init_ISA_irqs (void)
                        /*
                         * 16 old-style INTA-cycle interrupts:
                         */
-                       set_irq_chip_and_handler(i, &i8259A_chip,
-                                                handle_level_irq);
+                       set_irq_chip_and_handler_name(i, &i8259A_chip,
+                                                     handle_level_irq, "XT");
                } else {
                        /*
                         * 'high' PCI IRQs filled in on demand
index 771bcf77daf2104f830f9434712efe11a7e45868..49e94f7994c54f29fcc067533ab41563213b00bb 100644 (file)
@@ -660,7 +660,7 @@ next:
                }
                if (old_vector >= 0) {
                        int old_cpu;
-                       for_each_cpu_mask(old_cpu, domain)
+                       for_each_cpu_mask(old_cpu, irq_domain[irq])
                                per_cpu(vector_irq, old_cpu)[old_vector] = -1;
                }
                for_each_cpu_mask(new_cpu, domain)
@@ -696,11 +696,11 @@ static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
 {
        if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
                        trigger == IOAPIC_LEVEL)
-               set_irq_chip_and_handler(irq, &ioapic_chip,
-                                        handle_fasteoi_irq);
+               set_irq_chip_and_handler_name(irq, &ioapic_chip,
+                                             handle_fasteoi_irq, "fasteoi");
        else
-               set_irq_chip_and_handler(irq, &ioapic_chip,
-                                        handle_edge_irq);
+               set_irq_chip_and_handler_name(irq, &ioapic_chip,
+                                             handle_edge_irq, "edge");
 }
 
 static void __init setup_IO_APIC_irqs(void)
@@ -806,7 +806,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
         * The timer IRQ doesn't have to know that behind the
         * scene we have a 8259A-master in AEOI mode ...
         */
-       set_irq_chip_and_handler(0, &ioapic_chip, handle_edge_irq);
+       set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
 
        /*
         * Add it to the IO-APIC irq-routing table:
@@ -1839,7 +1839,7 @@ int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
 
        write_msi_msg(irq, &msg);
 
-       set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq);
+       set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
 
        return 0;
 }
@@ -1897,7 +1897,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
 }
 #endif
 
-static struct hw_interrupt_type ht_irq_chip = {
+static struct irq_chip ht_irq_chip = {
        .name           = "PCI-HT",
        .mask           = mask_ht_irq,
        .unmask         = unmask_ht_irq,
@@ -1936,7 +1936,8 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
                write_ht_irq_low(irq, low);
                write_ht_irq_high(irq, high);
 
-               set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq);
+               set_irq_chip_and_handler_name(irq, &ht_irq_chip,
+                                             handle_edge_irq, "edge");
        }
        return vector;
 }
index dff68eb2b7878dae6279338035d72d2ec61e4884..e46c55856d40ae6fb85daf785ca438dccba1b042 100644 (file)
@@ -75,7 +75,7 @@ int show_interrupts(struct seq_file *p, void *v)
                        seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 #endif
                seq_printf(p, " %8s", irq_desc[i].chip->name);
-               seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq));
+               seq_printf(p, "-%-8s", irq_desc[i].name);
 
                seq_printf(p, "  %s", action->name);
                for (action=action->next; action; action = action->next)
index 5e95b257ee26208612b8f28229612e8ce6796660..49f7fac6229e5f4e920d58fcdec1fd5e49e8667b 100644 (file)
@@ -238,20 +238,28 @@ void cpu_idle (void)
  * We execute MONITOR against need_resched and enter optimized wait state
  * through MWAIT. Whenever someone changes need_resched, we would be woken
  * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
  */
-static void mwait_idle(void)
+void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 {
-       local_irq_enable();
-
-       while (!need_resched()) {
+       if (!need_resched()) {
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                smp_mb();
-               if (need_resched())
-                       break;
-               __mwait(0, 0);
+               if (!need_resched())
+                       __mwait(eax, ecx);
        }
 }
 
+/* Default MONITOR/MWAIT with no hints, used for default C1 state */
+static void mwait_idle(void)
+{
+       local_irq_enable();
+       while (!need_resched())
+               mwait_idle_with_hints(0,0);
+}
+
 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
 {
        static int printed;
index 044e852bd25efb297c10545211d5fe80d5dfbf22..414caf0c5f9ae96b67bafea7589b532fa821b361 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pci_ids.h>
 #include <linux/pci_regs.h>
 #include <asm/pci-direct.h>
+#include <asm/io.h>
 
 static int __init vsmp_init(void)
 {
index 19c72520a86876a616becaf31ab3d72f73e8bb3f..971dc1181e69ace1620338b845a5f4e5869dee30 100644 (file)
@@ -406,9 +406,12 @@ void __cpuinit zap_low_mappings(int cpu)
 #ifndef CONFIG_NUMA
 void __init paging_init(void)
 {
-       unsigned long max_zone_pfns[MAX_NR_ZONES] = {MAX_DMA_PFN,
-                                                       MAX_DMA32_PFN,
-                                                       end_pfn};
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+       max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+       max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+       max_zone_pfns[ZONE_NORMAL] = end_pfn;
+
        memory_present(0, 0, end_pfn);
        sparse_init();
        free_area_init_nodes(max_zone_pfns);
index 829a008bd39b75a95eec6dfb66a46d4497459a48..2ee2e003606cad9cc727042af6ed435dadc45a53 100644 (file)
@@ -338,9 +338,11 @@ static void __init arch_sparse_init(void)
 void __init paging_init(void)
 { 
        int i;
-       unsigned long max_zone_pfns[MAX_NR_ZONES] = { MAX_DMA_PFN,
-               MAX_DMA32_PFN,
-               end_pfn};
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+       max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+       max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+       max_zone_pfns[ZONE_NORMAL] = end_pfn;
 
        arch_sparse_init();
 
index 487dd3da8853971d9bcb77cf6ecf51eb39faedf3..8ccd163254b8acf1469628e432308ab73cedd97b 100644 (file)
@@ -93,21 +93,18 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
 
 static struct elevator_type *elevator_find(const char *name)
 {
-       struct elevator_type *e = NULL;
+       struct elevator_type *e;
        struct list_head *entry;
 
        list_for_each(entry, &elv_list) {
-               struct elevator_type *__e;
 
-               __e = list_entry(entry, struct elevator_type, list);
+               e = list_entry(entry, struct elevator_type, list);
 
-               if (!strcmp(__e->elevator_name, name)) {
-                       e = __e;
-                       break;
-               }
+               if (!strcmp(e->elevator_name, name))
+                       return e;
        }
 
-       return e;
+       return NULL;
 }
 
 static void elevator_put(struct elevator_type *e)
@@ -1088,7 +1085,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
        struct list_head *entry;
        int len = 0;
 
-       spin_lock_irq(q->queue_lock);
+       spin_lock_irq(&elv_list_lock);
        list_for_each(entry, &elv_list) {
                struct elevator_type *__e;
 
@@ -1098,7 +1095,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
                else
                        len += sprintf(name+len, "%s ", __e->elevator_name);
        }
-       spin_unlock_irq(q->queue_lock);
+       spin_unlock_irq(&elv_list_lock);
 
        len += sprintf(len+name, "\n");
        return len;
index 263e86ddc1a4d0740b3e524ced6703d3a991f3d2..f39463418904f0777e105911edfec32b0331dc7a 100644 (file)
@@ -14,6 +14,10 @@ source "drivers/pnp/Kconfig"
 
 source "drivers/block/Kconfig"
 
+# misc before ide - BLK_DEV_SGIIOC4 depends on SGI_IOC4
+
+source "drivers/misc/Kconfig"
+
 source "drivers/ide/Kconfig"
 
 source "drivers/scsi/Kconfig"
@@ -52,8 +56,6 @@ source "drivers/w1/Kconfig"
 
 source "drivers/hwmon/Kconfig"
 
-source "drivers/misc/Kconfig"
-
 source "drivers/mfd/Kconfig"
 
 source "drivers/media/Kconfig"
index e9ee4c52a5f6266a7edc5df20d80aad2647338ea..c7ac9297a20499893da15a4f2c766285310d6f1b 100644 (file)
@@ -138,6 +138,7 @@ struct asus_hotk {
                S2x,            //S200 (J1 reported), Victor MP-XP7210
                W1N,            //W1000N
                W5A,            //W5A
+               W3V,            //W3030V
                xxN,            //M2400N, M3700N, M5200N, M6800N, S1300N, S5200N
                //(Centrino)
                END_MODEL
@@ -376,6 +377,17 @@ static struct model_data model_conf[END_MODEL] = {
         .display_get = "\\ADVG"},
 
        {
+        .name = "W3V",
+        .mt_mled = "MLED",
+        .mt_wled = "WLED",
+        .mt_lcd_switch = xxN_PREFIX "_Q10",
+        .lcd_status = "\\BKLT",
+        .brightness_set = "SPLV",
+        .brightness_get = "GPLV",
+        .display_set = "SDSP",
+        .display_get = "\\INFB"},
+
+       {
         .name = "xxN",
         .mt_mled = "MLED",
 /* WLED present, but not controlled by ACPI */
@@ -555,11 +567,11 @@ static int
 write_led(const char __user * buffer, unsigned long count,
          char *ledname, int ledmask, int invert)
 {
-       int value;
+       int rv, value;
        int led_out = 0;
 
-       count = parse_arg(buffer, count, &value);
-       if (count > 0)
+       rv = parse_arg(buffer, count, &value);
+       if (rv > 0)
                led_out = value ? 1 : 0;
 
        hotk->status =
@@ -572,7 +584,7 @@ write_led(const char __user * buffer, unsigned long count,
                printk(KERN_WARNING "Asus ACPI: LED (%s) write failed\n",
                       ledname);
 
-       return count;
+       return rv;
 }
 
 /*
@@ -607,20 +619,18 @@ static int
 proc_write_ledd(struct file *file, const char __user * buffer,
                unsigned long count, void *data)
 {
-       int value;
+       int rv, value;
 
-       count = parse_arg(buffer, count, &value);
-       if (count > 0) {
+       rv = parse_arg(buffer, count, &value);
+       if (rv > 0) {
                if (!write_acpi_int
                    (hotk->handle, hotk->methods->mt_ledd, value, NULL))
                        printk(KERN_WARNING
                               "Asus ACPI: LED display write failed\n");
                else
                        hotk->ledd_status = (u32) value;
-       } else if (count < 0)
-               printk(KERN_WARNING "Asus ACPI: Error reading user input\n");
-
-       return count;
+       }
+       return rv;
 }
 
 /*
@@ -761,12 +771,12 @@ static int
 proc_write_lcd(struct file *file, const char __user * buffer,
               unsigned long count, void *data)
 {
-       int value;
+       int rv, value;
 
-       count = parse_arg(buffer, count, &value);
-       if (count > 0)
+       rv = parse_arg(buffer, count, &value);
+       if (rv > 0)
                set_lcd_state(value);
-       return count;
+       return rv;
 }
 
 static int read_brightness(void)
@@ -830,18 +840,15 @@ static int
 proc_write_brn(struct file *file, const char __user * buffer,
               unsigned long count, void *data)
 {
-       int value;
+       int rv, value;
 
-       count = parse_arg(buffer, count, &value);
-       if (count > 0) {
+       rv = parse_arg(buffer, count, &value);
+       if (rv > 0) {
                value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
                /* 0 <= value <= 15 */
                set_brightness(value);
-       } else if (count < 0) {
-               printk(KERN_WARNING "Asus ACPI: Error reading user input\n");
        }
-
-       return count;
+       return rv;
 }
 
 static void set_display(int value)
@@ -880,15 +887,12 @@ static int
 proc_write_disp(struct file *file, const char __user * buffer,
                unsigned long count, void *data)
 {
-       int value;
+       int rv, value;
 
-       count = parse_arg(buffer, count, &value);
-       if (count > 0)
+       rv = parse_arg(buffer, count, &value);
+       if (rv > 0)
                set_display(value);
-       else if (count < 0)
-               printk(KERN_WARNING "Asus ACPI: Error reading user input\n");
-
-       return count;
+       return rv;
 }
 
 typedef int (proc_readfunc) (char *page, char **start, off_t off, int count,
@@ -1097,6 +1101,8 @@ static int asus_model_match(char *model)
                return A4G;
        else if (strncmp(model, "W1N", 3) == 0)
                return W1N;
+       else if (strncmp(model, "W3V", 3) == 0)
+               return W3V;
        else if (strncmp(model, "W5A", 3) == 0)
                return W5A;
        else
@@ -1200,9 +1206,10 @@ static int asus_hotk_get_info(void)
                hotk->methods->mt_wled = NULL;
        /* L5D's WLED is not controlled by ACPI */
        else if (strncmp(string, "M2N", 3) == 0 ||
+                strncmp(string, "W3V", 3) == 0 ||
                 strncmp(string, "S1N", 3) == 0)
                hotk->methods->mt_wled = "WLED";
-       /* M2N and S1N have a usable WLED */
+       /* M2N, S1N and W3V have a usable WLED */
        else if (asus_info) {
                if (strncmp(asus_info->oem_table_id, "L1", 2) == 0)
                        hotk->methods->mled_status = NULL;
index 9810e2a55d0adf3fb77758b20b9b670f3e2f5342..026e40755cdd7b0780c4832aade6a9601fb9db83 100644 (file)
@@ -64,6 +64,7 @@ extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
 
 static int acpi_battery_add(struct acpi_device *device);
 static int acpi_battery_remove(struct acpi_device *device, int type);
+static int acpi_battery_resume(struct acpi_device *device, int status);
 
 static struct acpi_driver acpi_battery_driver = {
        .name = ACPI_BATTERY_DRIVER_NAME,
@@ -71,6 +72,7 @@ static struct acpi_driver acpi_battery_driver = {
        .ids = ACPI_BATTERY_HID,
        .ops = {
                .add = acpi_battery_add,
+               .resume = acpi_battery_resume,
                .remove = acpi_battery_remove,
                },
 };
@@ -753,6 +755,18 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
        return 0;
 }
 
+/* this is needed to learn about changes made in suspended state */
+static int acpi_battery_resume(struct acpi_device *device, int state)
+{
+       struct acpi_battery *battery;
+
+       if (!device)
+               return -EINVAL;
+
+       battery = device->driver_data;
+       return acpi_battery_check(battery);
+}
+
 static int __init acpi_battery_init(void)
 {
        int result;
index e5d7963628543edd9eb6d91042d795df11433e19..e6d4b084dca2eeb40213074998ccdfde52278260 100644 (file)
@@ -45,206 +45,143 @@ ACPI_MODULE_NAME("acpi_ec")
 #define ACPI_EC_DRIVER_NAME            "ACPI Embedded Controller Driver"
 #define ACPI_EC_DEVICE_NAME            "Embedded Controller"
 #define ACPI_EC_FILE_INFO              "info"
+
+/* EC status register */
 #define ACPI_EC_FLAG_OBF       0x01    /* Output buffer full */
 #define ACPI_EC_FLAG_IBF       0x02    /* Input buffer full */
 #define ACPI_EC_FLAG_BURST     0x10    /* burst mode */
 #define ACPI_EC_FLAG_SCI       0x20    /* EC-SCI occurred */
-#define ACPI_EC_EVENT_OBF      0x01    /* Output buffer full */
-#define ACPI_EC_EVENT_IBE      0x02    /* Input buffer empty */
-#define ACPI_EC_DELAY          50      /* Wait 50ms max. during EC ops */
-#define ACPI_EC_UDELAY_GLK     1000    /* Wait 1ms max. to get global lock */
-#define ACPI_EC_UDELAY         100     /* Poll @ 100us increments */
-#define ACPI_EC_UDELAY_COUNT   1000    /* Wait 10ms max. during EC ops */
+
+/* EC commands */
 #define ACPI_EC_COMMAND_READ   0x80
 #define ACPI_EC_COMMAND_WRITE  0x81
 #define ACPI_EC_BURST_ENABLE   0x82
 #define ACPI_EC_BURST_DISABLE  0x83
 #define ACPI_EC_COMMAND_QUERY  0x84
-#define EC_POLL                        0xFF
-#define EC_INTR                        0x00
+
+/* EC events */
+enum {
+       ACPI_EC_EVENT_OBF_1 = 1,        /* Output buffer full */
+       ACPI_EC_EVENT_IBF_0,            /* Input buffer empty */
+};
+
+#define ACPI_EC_DELAY          50      /* Wait 50ms max. during EC ops */
+#define ACPI_EC_UDELAY_GLK     1000    /* Wait 1ms max. to get global lock */
+#define ACPI_EC_UDELAY         100     /* Poll @ 100us increments */
+#define ACPI_EC_UDELAY_COUNT   1000    /* Wait 10ms max. during EC ops */
+
+enum {
+       EC_INTR = 1,    /* Output buffer full */
+       EC_POLL,        /* Input buffer empty */
+};
+
 static int acpi_ec_remove(struct acpi_device *device, int type);
 static int acpi_ec_start(struct acpi_device *device);
 static int acpi_ec_stop(struct acpi_device *device, int type);
-static int acpi_ec_intr_add(struct acpi_device *device);
-static int acpi_ec_poll_add(struct acpi_device *device);
+static int acpi_ec_add(struct acpi_device *device);
 
 static struct acpi_driver acpi_ec_driver = {
        .name = ACPI_EC_DRIVER_NAME,
        .class = ACPI_EC_CLASS,
        .ids = ACPI_EC_HID,
        .ops = {
-               .add = acpi_ec_intr_add,
+               .add = acpi_ec_add,
                .remove = acpi_ec_remove,
                .start = acpi_ec_start,
                .stop = acpi_ec_stop,
                },
 };
-union acpi_ec {
-       struct {
-               u32 mode;
-               acpi_handle handle;
-               unsigned long uid;
-               unsigned long gpe_bit;
-               struct acpi_generic_address status_addr;
-               struct acpi_generic_address command_addr;
-               struct acpi_generic_address data_addr;
-               unsigned long global_lock;
-       } common;
-
-       struct {
-               u32 mode;
-               acpi_handle handle;
-               unsigned long uid;
-               unsigned long gpe_bit;
-               struct acpi_generic_address status_addr;
-               struct acpi_generic_address command_addr;
-               struct acpi_generic_address data_addr;
-               unsigned long global_lock;
-               unsigned int expect_event;
-               atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort */
-               atomic_t pending_gpe;
-               struct semaphore sem;
-               wait_queue_head_t wait;
-       } intr;
-
-       struct {
-               u32 mode;
-               acpi_handle handle;
-               unsigned long uid;
-               unsigned long gpe_bit;
-               struct acpi_generic_address status_addr;
-               struct acpi_generic_address command_addr;
-               struct acpi_generic_address data_addr;
-               unsigned long global_lock;
-               struct semaphore sem;
-       } poll;
-};
 
-static int acpi_ec_poll_wait(union acpi_ec *ec, u8 event);
-static int acpi_ec_intr_wait(union acpi_ec *ec, unsigned int event);
-static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data);
-static int acpi_ec_intr_read(union acpi_ec *ec, u8 address, u32 * data);
-static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data);
-static int acpi_ec_intr_write(union acpi_ec *ec, u8 address, u8 data);
-static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data);
-static int acpi_ec_intr_query(union acpi_ec *ec, u32 * data);
-static void acpi_ec_gpe_poll_query(void *ec_cxt);
-static void acpi_ec_gpe_intr_query(void *ec_cxt);
-static u32 acpi_ec_gpe_poll_handler(void *data);
-static u32 acpi_ec_gpe_intr_handler(void *data);
-static acpi_status __init
-acpi_fake_ecdt_poll_callback(acpi_handle handle,
-                               u32 Level, void *context, void **retval);
-
-static acpi_status __init
-acpi_fake_ecdt_intr_callback(acpi_handle handle,
-                             u32 Level, void *context, void **retval);
-
-static int __init acpi_ec_poll_get_real_ecdt(void);
-static int __init acpi_ec_intr_get_real_ecdt(void);
 /* If we find an EC via the ECDT, we need to keep a ptr to its context */
-static union acpi_ec *ec_ecdt;
+struct acpi_ec {
+       acpi_handle handle;
+       unsigned long uid;
+       unsigned long gpe_bit;
+       unsigned long command_addr;
+       unsigned long data_addr;
+       unsigned long global_lock;
+       struct semaphore sem;
+       unsigned int expect_event;
+       atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort */
+       wait_queue_head_t wait;
+} *ec_ecdt;
 
 /* External interfaces use first EC only, so remember */
 static struct acpi_device *first_ec;
-static int acpi_ec_poll_mode = EC_INTR;
+static int acpi_ec_mode = EC_INTR;
 
 /* --------------------------------------------------------------------------
                              Transaction Management
    -------------------------------------------------------------------------- */
 
-static u32 acpi_ec_read_status(union acpi_ec *ec)
+static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
 {
-       u32 status = 0;
-
-       acpi_hw_low_level_read(8, &status, &ec->common.status_addr);
-       return status;
+       return inb(ec->command_addr);
 }
 
-static int acpi_ec_wait(union acpi_ec *ec, u8 event)
+static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
 {
-       if (acpi_ec_poll_mode)
-               return acpi_ec_poll_wait(ec, event);
-       else
-               return acpi_ec_intr_wait(ec, event);
+       return inb(ec->data_addr);
 }
 
-static int acpi_ec_poll_wait(union acpi_ec *ec, u8 event)
+static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
 {
-       u32 acpi_ec_status = 0;
-       u32 i = ACPI_EC_UDELAY_COUNT;
+       outb(command, ec->command_addr);
+}
 
-       if (!ec)
-               return -EINVAL;
+static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
+{
+       outb(data, ec->data_addr);
+}
 
-       /* Poll the EC status register waiting for the event to occur. */
+static int acpi_ec_check_status(u8 status, u8 event)
+{
        switch (event) {
-       case ACPI_EC_EVENT_OBF:
-               do {
-                       acpi_hw_low_level_read(8, &acpi_ec_status,
-                                              &ec->common.status_addr);
-                       if (acpi_ec_status & ACPI_EC_FLAG_OBF)
-                               return 0;
-                       udelay(ACPI_EC_UDELAY);
-               } while (--i > 0);
+       case ACPI_EC_EVENT_OBF_1:
+               if (status & ACPI_EC_FLAG_OBF)
+                       return 1;
                break;
-       case ACPI_EC_EVENT_IBE:
-               do {
-                       acpi_hw_low_level_read(8, &acpi_ec_status,
-                                              &ec->common.status_addr);
-                       if (!(acpi_ec_status & ACPI_EC_FLAG_IBF))
-                               return 0;
-                       udelay(ACPI_EC_UDELAY);
-               } while (--i > 0);
+       case ACPI_EC_EVENT_IBF_0:
+               if (!(status & ACPI_EC_FLAG_IBF))
+                       return 1;
                break;
        default:
-               return -EINVAL;
+               break;
        }
 
-       return -ETIME;
+       return 0;
 }
-static int acpi_ec_intr_wait(union acpi_ec *ec, unsigned int event)
-{
-       int result = 0;
-
 
-       ec->intr.expect_event = event;
-       smp_mb();
+static int acpi_ec_wait(struct acpi_ec *ec, u8 event)
+{
+       int i = (acpi_ec_mode == EC_POLL) ? ACPI_EC_UDELAY_COUNT : 0;
+       long time_left;
 
-       switch (event) {
-       case ACPI_EC_EVENT_IBE:
-               if (~acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) {
-                       ec->intr.expect_event = 0;
-                       return 0;
-               }
-               break;
-       default:
-               break;
+       ec->expect_event = event;
+       if (acpi_ec_check_status(acpi_ec_read_status(ec), event)) {
+               ec->expect_event = 0;
+               return 0;
        }
 
-       result = wait_event_timeout(ec->intr.wait,
-                                   !ec->intr.expect_event,
+       do {
+               if (acpi_ec_mode == EC_POLL) {
+                       udelay(ACPI_EC_UDELAY);
+               } else {
+                       time_left = wait_event_timeout(ec->wait,
+                                   !ec->expect_event,
                                    msecs_to_jiffies(ACPI_EC_DELAY));
-
-       ec->intr.expect_event = 0;
-       smp_mb();
-
-       /*
-        * Verify that the event in question has actually happened by
-        * querying EC status. Do the check even if operation timed-out
-        * to make sure that we did not miss interrupt.
-        */
-       switch (event) {
-       case ACPI_EC_EVENT_OBF:
-               if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_OBF)
+                       if (time_left > 0) {
+                               ec->expect_event = 0;
+                               return 0;
+                       }
+               }
+               if (acpi_ec_check_status(acpi_ec_read_status(ec), event)) {
+                       ec->expect_event = 0;
                        return 0;
-               break;
+               }
+       } while (--i > 0);
 
-       case ACPI_EC_EVENT_IBE:
-               if (~acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
-                       return 0;
-               break;
-       }
+       ec->expect_event = 0;
 
        return -ETIME;
 }
@@ -254,272 +191,150 @@ static int acpi_ec_intr_wait(union acpi_ec *ec, unsigned int event)
  * Note: samsung nv5000 doesn't work with ec burst mode.
  * http://bugzilla.kernel.org/show_bug.cgi?id=4980
  */
-int acpi_ec_enter_burst_mode(union acpi_ec *ec)
+int acpi_ec_enter_burst_mode(struct acpi_ec *ec)
 {
-       u32 tmp = 0;
-       int status = 0;
+       u8 tmp = 0;
+       u8 status = 0;
 
 
        status = acpi_ec_read_status(ec);
        if (status != -EINVAL && !(status & ACPI_EC_FLAG_BURST)) {
-               status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
+               status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0);
                if (status)
                        goto end;
-               acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE,
-                                       &ec->common.command_addr);
-               status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF);
-               acpi_hw_low_level_read(8, &tmp, &ec->common.data_addr);
+               acpi_ec_write_cmd(ec, ACPI_EC_BURST_ENABLE);
+               status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1);
+               tmp = acpi_ec_read_data(ec);
                if (tmp != 0x90) {      /* Burst ACK byte */
                        return -EINVAL;
                }
        }
 
-       atomic_set(&ec->intr.leaving_burst, 0);
+       atomic_set(&ec->leaving_burst, 0);
        return 0;
-      end:
-       ACPI_EXCEPTION ((AE_INFO, status, "EC wait, burst mode");
+  end:
+       ACPI_EXCEPTION((AE_INFO, status, "EC wait, burst mode"));
        return -1;
 }
 
-int acpi_ec_leave_burst_mode(union acpi_ec *ec)
+int acpi_ec_leave_burst_mode(struct acpi_ec *ec)
 {
-       int status = 0;
+       u8 status = 0;
 
 
        status = acpi_ec_read_status(ec);
        if (status != -EINVAL && (status & ACPI_EC_FLAG_BURST)){
-               status = acpi_ec_wait(ec, ACPI_EC_FLAG_IBF);
+               status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0);
                if(status)
                        goto end;
-               acpi_hw_low_level_write(8, ACPI_EC_BURST_DISABLE, &ec->common.command_addr);
-               acpi_ec_wait(ec, ACPI_EC_FLAG_IBF);
-       } 
-       atomic_set(&ec->intr.leaving_burst, 1);
+               acpi_ec_write_cmd(ec, ACPI_EC_BURST_DISABLE);
+               acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0);
+       }
+       atomic_set(&ec->leaving_burst, 1);
        return 0;
-end:
-       ACPI_EXCEPTION((AE_INFO, status, "EC leave burst mode");
+  end:
+       ACPI_EXCEPTION((AE_INFO, status, "EC leave burst mode"));
        return -1;
 }
 #endif /* ACPI_FUTURE_USAGE */
 
-static int acpi_ec_read(union acpi_ec *ec, u8 address, u32 * data)
-{
-       if (acpi_ec_poll_mode)
-               return acpi_ec_poll_read(ec, address, data);
-       else
-               return acpi_ec_intr_read(ec, address, data);
-}
-static int acpi_ec_write(union acpi_ec *ec, u8 address, u8 data)
-{
-       if (acpi_ec_poll_mode)
-               return acpi_ec_poll_write(ec, address, data);
-       else
-               return acpi_ec_intr_write(ec, address, data);
-}
-static int acpi_ec_poll_read(union acpi_ec *ec, u8 address, u32 * data)
+static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
+                                       const u8 *wdata, unsigned wdata_len,
+                                       u8 *rdata, unsigned rdata_len)
 {
-       acpi_status status = AE_OK;
-       int result = 0;
-       u32 glk = 0;
+       int result;
 
+       acpi_ec_write_cmd(ec, command);
 
-       if (!ec || !data)
-               return -EINVAL;
-
-       *data = 0;
-
-       if (ec->common.global_lock) {
-               status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
-               if (ACPI_FAILURE(status))
-                       return -ENODEV;
+       for (; wdata_len > 0; wdata_len --) {
+               result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0);
+               if (result)
+                       return result;
+               acpi_ec_write_data(ec, *(wdata++));
        }
 
-       if (down_interruptible(&ec->poll.sem)) {
-               result = -ERESTARTSYS;
-               goto end_nosem;
+       if (command == ACPI_EC_COMMAND_WRITE) {
+               result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0);
+               if (result)
+                       return result;
        }
-       
-       acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ,
-                               &ec->common.command_addr);
-       result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
-       if (result)
-               goto end;
-
-       acpi_hw_low_level_write(8, address, &ec->common.data_addr);
-       result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF);
-       if (result)
-               goto end;
-
-       acpi_hw_low_level_read(8, data, &ec->common.data_addr);
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n",
-                         *data, address));
-
-      end:
-       up(&ec->poll.sem);
-end_nosem:
-       if (ec->common.global_lock)
-               acpi_release_global_lock(glk);
-
-       return result;
-}
-
-static int acpi_ec_poll_write(union acpi_ec *ec, u8 address, u8 data)
-{
-       int result = 0;
-       acpi_status status = AE_OK;
-       u32 glk = 0;
 
+       for (; rdata_len > 0; rdata_len --) {
+               result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1);
+               if (result)
+                       return result;
 
-       if (!ec)
-               return -EINVAL;
-
-       if (ec->common.global_lock) {
-               status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
-               if (ACPI_FAILURE(status))
-                       return -ENODEV;
-       }
-
-       if (down_interruptible(&ec->poll.sem)) {
-               result = -ERESTARTSYS;
-               goto end_nosem;
+               *(rdata++) = acpi_ec_read_data(ec);
        }
-       
-       acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE,
-                               &ec->common.command_addr);
-       result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
-       if (result)
-               goto end;
-
-       acpi_hw_low_level_write(8, address, &ec->common.data_addr);
-       result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
-       if (result)
-               goto end;
-
-       acpi_hw_low_level_write(8, data, &ec->common.data_addr);
-       result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
-       if (result)
-               goto end;
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Wrote [%02x] to address [%02x]\n",
-                         data, address));
-
-      end:
-       up(&ec->poll.sem);
-end_nosem:
-       if (ec->common.global_lock)
-               acpi_release_global_lock(glk);
-
-       return result;
+       return 0;
 }
 
-static int acpi_ec_intr_read(union acpi_ec *ec, u8 address, u32 * data)
+static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
+                               const u8 *wdata, unsigned wdata_len,
+                               u8 *rdata, unsigned rdata_len)
 {
-       int status = 0;
+       int status;
        u32 glk;
 
-
-       if (!ec || !data)
+       if (!ec || (wdata_len && !wdata) || (rdata_len && !rdata))
                return -EINVAL;
 
-       *data = 0;
+        if (rdata)
+                memset(rdata, 0, rdata_len);
 
-       if (ec->common.global_lock) {
+       if (ec->global_lock) {
                status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
                if (ACPI_FAILURE(status))
                        return -ENODEV;
        }
+       down(&ec->sem);
 
-       WARN_ON(in_interrupt());
-       down(&ec->intr.sem);
-
-       status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
+       status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0);
        if (status) {
                printk(KERN_DEBUG PREFIX "read EC, IB not empty\n");
                goto end;
        }
-       acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ,
-                               &ec->common.command_addr);
-       status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
-       if (status) {
-               printk(KERN_DEBUG PREFIX "read EC, IB not empty\n");
-       }
 
-       acpi_hw_low_level_write(8, address, &ec->common.data_addr);
-       status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF);
-       if (status) {
-               printk(KERN_DEBUG PREFIX "read EC, OB not full\n");
-               goto end;
-       }
-       acpi_hw_low_level_read(8, data, &ec->common.data_addr);
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n",
-                         *data, address));
+        status = acpi_ec_transaction_unlocked(ec, command,
+                                              wdata, wdata_len,
+                                              rdata, rdata_len);
 
-      end:
-       up(&ec->intr.sem);
+end:
+       up(&ec->sem);
 
-       if (ec->common.global_lock)
+       if (ec->global_lock)
                acpi_release_global_lock(glk);
 
        return status;
 }
 
-static int acpi_ec_intr_write(union acpi_ec *ec, u8 address, u8 data)
+static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
 {
-       int status = 0;
-       u32 glk;
-
-
-       if (!ec)
-               return -EINVAL;
-
-       if (ec->common.global_lock) {
-               status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
-               if (ACPI_FAILURE(status))
-                       return -ENODEV;
-       }
-
-       WARN_ON(in_interrupt());
-       down(&ec->intr.sem);
-
-       status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
-       if (status) {
-               printk(KERN_DEBUG PREFIX "write EC, IB not empty\n");
-       }
-       acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE,
-                               &ec->common.command_addr);
-       status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
-       if (status) {
-               printk(KERN_DEBUG PREFIX "write EC, IB not empty\n");
-       }
-
-       acpi_hw_low_level_write(8, address, &ec->common.data_addr);
-       status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
-       if (status) {
-               printk(KERN_DEBUG PREFIX "write EC, IB not empty\n");
-       }
-
-       acpi_hw_low_level_write(8, data, &ec->common.data_addr);
+       int result;
+       u8 d;
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Wrote [%02x] to address [%02x]\n",
-                         data, address));
-
-       up(&ec->intr.sem);
-
-       if (ec->common.global_lock)
-               acpi_release_global_lock(glk);
+       result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ,
+                                    &address, 1, &d, 1);
+       *data = d;
+       return result;
+}
 
-       return status;
+static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
+{
+        u8 wdata[2] = { address, data };
+        return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE,
+                                  wdata, 2, NULL, 0);
 }
 
 /*
  * Externally callable EC access functions. For now, assume 1 EC only
  */
-int ec_read(u8 addr, u8 * val)
+int ec_read(u8 addr, u8 *val)
 {
-       union acpi_ec *ec;
+       struct acpi_ec *ec;
        int err;
-       u32 temp_data;
+       u8 temp_data;
 
        if (!first_ec)
                return -ENODEV;
@@ -539,7 +354,7 @@ EXPORT_SYMBOL(ec_read);
 
 int ec_write(u8 addr, u8 val)
 {
-       union acpi_ec *ec;
+       struct acpi_ec *ec;
        int err;
 
        if (!first_ec)
@@ -554,255 +369,106 @@ int ec_write(u8 addr, u8 val)
 
 EXPORT_SYMBOL(ec_write);
 
-static int acpi_ec_query(union acpi_ec *ec, u32 * data)
-{
-       if (acpi_ec_poll_mode)
-               return acpi_ec_poll_query(ec, data);
-       else
-               return acpi_ec_intr_query(ec, data);
-}
-static int acpi_ec_poll_query(union acpi_ec *ec, u32 * data)
+extern int ec_transaction(u8 command,
+                          const u8 *wdata, unsigned wdata_len,
+                          u8 *rdata, unsigned rdata_len)
 {
-       int result = 0;
-       acpi_status status = AE_OK;
-       u32 glk = 0;
-
-
-       if (!ec || !data)
-               return -EINVAL;
-
-       *data = 0;
-
-       if (ec->common.global_lock) {
-               status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
-               if (ACPI_FAILURE(status))
-                       return -ENODEV;
-       }
+       struct acpi_ec *ec;
 
-       /*
-        * Query the EC to find out which _Qxx method we need to evaluate.
-        * Note that successful completion of the query causes the ACPI_EC_SCI
-        * bit to be cleared (and thus clearing the interrupt source).
-        */
-       if (down_interruptible(&ec->poll.sem)) {
-               result = -ERESTARTSYS;
-               goto end_nosem;
-       }
-       
-       acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY,
-                               &ec->common.command_addr);
-       result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF);
-       if (result)
-               goto end;
-
-       acpi_hw_low_level_read(8, data, &ec->common.data_addr);
-       if (!*data)
-               result = -ENODATA;
+       if (!first_ec)
+               return -ENODEV;
 
-      end:
-       up(&ec->poll.sem);
-end_nosem:
-       if (ec->common.global_lock)
-               acpi_release_global_lock(glk);
+       ec = acpi_driver_data(first_ec);
 
-       return result;
+       return acpi_ec_transaction(ec, command, wdata,
+                                  wdata_len, rdata, rdata_len);
 }
-static int acpi_ec_intr_query(union acpi_ec *ec, u32 * data)
-{
-       int status = 0;
-       u32 glk;
-
 
-       if (!ec || !data)
-               return -EINVAL;
-       *data = 0;
-
-       if (ec->common.global_lock) {
-               status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
-               if (ACPI_FAILURE(status))
-                       return -ENODEV;
-       }
+EXPORT_SYMBOL(ec_transaction);
 
-       down(&ec->intr.sem);
+static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
+{
+       int result;
+        u8 d;
 
-       status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE);
-       if (status) {
-               printk(KERN_DEBUG PREFIX "query EC, IB not empty\n");
-               goto end;
-       }
-       /*
-        * Query the EC to find out which _Qxx method we need to evaluate.
-        * Note that successful completion of the query causes the ACPI_EC_SCI
-        * bit to be cleared (and thus clearing the interrupt source).
-        */
-       acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY,
-                               &ec->common.command_addr);
-       status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF);
-       if (status) {
-               printk(KERN_DEBUG PREFIX "query EC, OB not full\n");
-               goto end;
-       }
+        if (!ec || !data)
+                return -EINVAL;
 
-       acpi_hw_low_level_read(8, data, &ec->common.data_addr);
-       if (!*data)
-               status = -ENODATA;
+        /*
+         * Query the EC to find out which _Qxx method we need to evaluate.
+         * Note that successful completion of the query causes the ACPI_EC_SCI
+         * bit to be cleared (and thus clearing the interrupt source).
+         */
 
-      end:
-       up(&ec->intr.sem);
+        result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1);
+        if (result)
+                return result;
 
-       if (ec->common.global_lock)
-               acpi_release_global_lock(glk);
+        if (!d)
+                return -ENODATA;
 
-       return status;
+        *data = d;
+        return 0;
 }
 
 /* --------------------------------------------------------------------------
                                 Event Management
    -------------------------------------------------------------------------- */
 
-union acpi_ec_query_data {
+struct acpi_ec_query_data {
        acpi_handle handle;
        u8 data;
 };
 
 static void acpi_ec_gpe_query(void *ec_cxt)
 {
-       if (acpi_ec_poll_mode)
-               acpi_ec_gpe_poll_query(ec_cxt);
-       else
-               acpi_ec_gpe_intr_query(ec_cxt);
-}
-
-static void acpi_ec_gpe_poll_query(void *ec_cxt)
-{
-       union acpi_ec *ec = (union acpi_ec *)ec_cxt;
-       u32 value = 0;
-       static char object_name[5] = { '_', 'Q', '0', '0', '\0' };
-       const char hex[] = { '0', '1', '2', '3', '4', '5', '6', '7',
-               '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
-       };
-
+       struct acpi_ec *ec = (struct acpi_ec *)ec_cxt;
+       u8 value = 0;
+       static char object_name[8];
 
-       if (!ec_cxt)
+       if (!ec)
                goto end;
 
-       if (down_interruptible (&ec->poll.sem)) {
-               return;
-       }
-       acpi_hw_low_level_read(8, &value, &ec->common.command_addr);
-       up(&ec->poll.sem);
-
-       /* TBD: Implement asynch events!
-        * NOTE: All we care about are EC-SCI's.  Other EC events are
-        * handled via polling (yuck!).  This is because some systems
-        * treat EC-SCIs as level (versus EDGE!) triggered, preventing
-        *  a purely interrupt-driven approach (grumble, grumble).
-        */
+       value = acpi_ec_read_status(ec);
+
        if (!(value & ACPI_EC_FLAG_SCI))
                goto end;
 
        if (acpi_ec_query(ec, &value))
                goto end;
 
-       object_name[2] = hex[((value >> 4) & 0x0F)];
-       object_name[3] = hex[(value & 0x0F)];
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name));
+       snprintf(object_name, 8, "_Q%2.2X", value);
 
-       acpi_evaluate_object(ec->common.handle, object_name, NULL, NULL);
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s", object_name));
 
-      end:
-       acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR);
-}
-static void acpi_ec_gpe_intr_query(void *ec_cxt)
-{
-       union acpi_ec *ec = (union acpi_ec *)ec_cxt;
-       u32 value;
-       int result = -ENODATA;
-       static char object_name[5] = { '_', 'Q', '0', '0', '\0' };
-       const char hex[] = { '0', '1', '2', '3', '4', '5', '6', '7',
-               '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
-       };
+       acpi_evaluate_object(ec->handle, object_name, NULL, NULL);
 
-
-       if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_SCI)
-               result = acpi_ec_query(ec, &value);
-
-       if (result)
-               goto end;
-
-       object_name[2] = hex[((value >> 4) & 0x0F)];
-       object_name[3] = hex[(value & 0x0F)];
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name));
-
-       acpi_evaluate_object(ec->common.handle, object_name, NULL, NULL);
       end:
-       atomic_dec(&ec->intr.pending_gpe);
-       return;
+       acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR);
 }
 
 static u32 acpi_ec_gpe_handler(void *data)
-{
-       if (acpi_ec_poll_mode)
-               return acpi_ec_gpe_poll_handler(data);
-       else
-               return acpi_ec_gpe_intr_handler(data);
-}
-static u32 acpi_ec_gpe_poll_handler(void *data)
 {
        acpi_status status = AE_OK;
-       union acpi_ec *ec = (union acpi_ec *)data;
-
-       if (!ec)
-               return ACPI_INTERRUPT_NOT_HANDLED;
-
-       acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR);
-
-       status = acpi_os_execute(OSL_EC_POLL_HANDLER, acpi_ec_gpe_query, ec);
-
-       if (status == AE_OK)
-               return ACPI_INTERRUPT_HANDLED;
-       else
-               return ACPI_INTERRUPT_NOT_HANDLED;
-}
-static u32 acpi_ec_gpe_intr_handler(void *data)
-{
-       acpi_status status = AE_OK;
-       u32 value;
-       union acpi_ec *ec = (union acpi_ec *)data;
-
-       if (!ec)
-               return ACPI_INTERRUPT_NOT_HANDLED;
+       u8 value;
+       struct acpi_ec *ec = (struct acpi_ec *)data;
 
-       acpi_clear_gpe(NULL, ec->common.gpe_bit, ACPI_ISR);
+       acpi_clear_gpe(NULL, ec->gpe_bit, ACPI_ISR);
        value = acpi_ec_read_status(ec);
 
-       switch (ec->intr.expect_event) {
-       case ACPI_EC_EVENT_OBF:
-               if (!(value & ACPI_EC_FLAG_OBF))
-                       break;
-               ec->intr.expect_event = 0;
-               wake_up(&ec->intr.wait);
-               break;
-       case ACPI_EC_EVENT_IBE:
-               if ((value & ACPI_EC_FLAG_IBF))
-                       break;
-               ec->intr.expect_event = 0;
-               wake_up(&ec->intr.wait);
-               break;
-       default:
-               break;
+       if (acpi_ec_mode == EC_INTR) {
+               if (acpi_ec_check_status(value, ec->expect_event)) {
+                       ec->expect_event = 0;
+                       wake_up(&ec->wait);
+               }
        }
 
        if (value & ACPI_EC_FLAG_SCI) {
-               atomic_add(1, &ec->intr.pending_gpe);
-               status = acpi_os_execute(OSL_EC_BURST_HANDLER,
-                                                    acpi_ec_gpe_query, ec);
+               status = acpi_os_execute(OSL_EC_BURST_HANDLER, acpi_ec_gpe_query, ec);
                return status == AE_OK ?
                    ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
        }
-       acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR);
+       acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_ISR);
        return status == AE_OK ?
            ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
 }
@@ -833,7 +499,7 @@ acpi_ec_space_handler(u32 function,
                      void *handler_context, void *region_context)
 {
        int result = 0;
-       union acpi_ec *ec = NULL;
+       struct acpi_ec *ec = NULL;
        u64 temp = *value;
        acpi_integer f_v = 0;
        int i = 0;
@@ -843,18 +509,16 @@ acpi_ec_space_handler(u32 function,
                return AE_BAD_PARAMETER;
 
        if (bit_width != 8 && acpi_strict) {
-               printk(KERN_WARNING PREFIX
-                      "acpi_ec_space_handler: bit_width should be 8\n");
                return AE_BAD_PARAMETER;
        }
 
-       ec = (union acpi_ec *)handler_context;
+       ec = (struct acpi_ec *)handler_context;
 
       next_byte:
        switch (function) {
        case ACPI_READ:
                temp = 0;
-               result = acpi_ec_read(ec, (u8) address, (u32 *) & temp);
+               result = acpi_ec_read(ec, (u8) address, (u8 *) &temp);
                break;
        case ACPI_WRITE:
                result = acpi_ec_write(ec, (u8) address, (u8) temp);
@@ -905,20 +569,20 @@ static struct proc_dir_entry *acpi_ec_dir;
 
 static int acpi_ec_read_info(struct seq_file *seq, void *offset)
 {
-       union acpi_ec *ec = (union acpi_ec *)seq->private;
+       struct acpi_ec *ec = (struct acpi_ec *)seq->private;
 
 
        if (!ec)
                goto end;
 
        seq_printf(seq, "gpe bit:                 0x%02x\n",
-                  (u32) ec->common.gpe_bit);
+                  (u32) ec->gpe_bit);
        seq_printf(seq, "ports:                   0x%02x, 0x%02x\n",
-                  (u32) ec->common.status_addr.address,
-                  (u32) ec->common.data_addr.address);
+                  (u32) ec->command_addr,
+                  (u32) ec->data_addr);
        seq_printf(seq, "use global lock:         %s\n",
-                  ec->common.global_lock ? "yes" : "no");
-       acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR);
+                  ec->global_lock ? "yes" : "no");
+       acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR);
 
       end:
        return 0;
@@ -929,7 +593,7 @@ static int acpi_ec_info_open_fs(struct inode *inode, struct file *file)
        return single_open(file, acpi_ec_read_info, PDE(inode)->data);
 }
 
-static const struct file_operations acpi_ec_info_ops = {
+static struct file_operations acpi_ec_info_ops = {
        .open = acpi_ec_info_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -978,101 +642,35 @@ static int acpi_ec_remove_fs(struct acpi_device *device)
                                Driver Interface
    -------------------------------------------------------------------------- */
 
-static int acpi_ec_poll_add(struct acpi_device *device)
+static int acpi_ec_add(struct acpi_device *device)
 {
        int result = 0;
        acpi_status status = AE_OK;
-       union acpi_ec *ec = NULL;
+       struct acpi_ec *ec = NULL;
 
 
        if (!device)
                return -EINVAL;
 
-       ec = kmalloc(sizeof(union acpi_ec), GFP_KERNEL);
+       ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
        if (!ec)
                return -ENOMEM;
-       memset(ec, 0, sizeof(union acpi_ec));
-
-       ec->common.handle = device->handle;
-       ec->common.uid = -1;
-       init_MUTEX(&ec->poll.sem);
-       strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
-       strcpy(acpi_device_class(device), ACPI_EC_CLASS);
-       acpi_driver_data(device) = ec;
-
-       /* Use the global lock for all EC transactions? */
-       acpi_evaluate_integer(ec->common.handle, "_GLK", NULL,
-                             &ec->common.global_lock);
-
-       /* XXX we don't test uids, because on some boxes ecdt uid = 0, see:
-          http://bugzilla.kernel.org/show_bug.cgi?id=6111 */
-       if (ec_ecdt) {
-               acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
-                                                 ACPI_ADR_SPACE_EC,
-                                                 &acpi_ec_space_handler);
-
-               acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit,
-                                       &acpi_ec_gpe_handler);
-
-               kfree(ec_ecdt);
+       memset(ec, 0, sizeof(struct acpi_ec));
+
+       ec->handle = device->handle;
+       ec->uid = -1;
+       init_MUTEX(&ec->sem);
+       if (acpi_ec_mode == EC_INTR) {
+               atomic_set(&ec->leaving_burst, 1);
+               init_waitqueue_head(&ec->wait);
        }
-
-       /* Get GPE bit assignment (EC events). */
-       /* TODO: Add support for _GPE returning a package */
-       status =
-           acpi_evaluate_integer(ec->common.handle, "_GPE", NULL,
-                                 &ec->common.gpe_bit);
-       if (ACPI_FAILURE(status)) {
-               ACPI_EXCEPTION((AE_INFO, status, "Obtaining GPE bit"));
-               result = -ENODEV;
-               goto end;
-       }
-
-       result = acpi_ec_add_fs(device);
-       if (result)
-               goto end;
-
-       printk(KERN_INFO PREFIX "%s [%s] (gpe %d) polling mode.\n",
-              acpi_device_name(device), acpi_device_bid(device),
-              (u32) ec->common.gpe_bit);
-
-       if (!first_ec)
-               first_ec = device;
-
-      end:
-       if (result)
-               kfree(ec);
-
-       return result;
-}
-static int acpi_ec_intr_add(struct acpi_device *device)
-{
-       int result = 0;
-       acpi_status status = AE_OK;
-       union acpi_ec *ec = NULL;
-
-
-       if (!device)
-               return -EINVAL;
-
-       ec = kmalloc(sizeof(union acpi_ec), GFP_KERNEL);
-       if (!ec)
-               return -ENOMEM;
-       memset(ec, 0, sizeof(union acpi_ec));
-
-       ec->common.handle = device->handle;
-       ec->common.uid = -1;
-       atomic_set(&ec->intr.pending_gpe, 0);
-       atomic_set(&ec->intr.leaving_burst, 1);
-       init_MUTEX(&ec->intr.sem);
-       init_waitqueue_head(&ec->intr.wait);
        strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_EC_CLASS);
        acpi_driver_data(device) = ec;
 
        /* Use the global lock for all EC transactions? */
-       acpi_evaluate_integer(ec->common.handle, "_GLK", NULL,
-                             &ec->common.global_lock);
+       acpi_evaluate_integer(ec->handle, "_GLK", NULL,
+                             &ec->global_lock);
 
        /* XXX we don't test uids, because on some boxes ecdt uid = 0, see:
           http://bugzilla.kernel.org/show_bug.cgi?id=6111 */
@@ -1081,7 +679,7 @@ static int acpi_ec_intr_add(struct acpi_device *device)
                                                  ACPI_ADR_SPACE_EC,
                                                  &acpi_ec_space_handler);
 
-               acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit,
+               acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit,
                                        &acpi_ec_gpe_handler);
 
                kfree(ec_ecdt);
@@ -1090,10 +688,10 @@ static int acpi_ec_intr_add(struct acpi_device *device)
        /* Get GPE bit assignment (EC events). */
        /* TODO: Add support for _GPE returning a package */
        status =
-           acpi_evaluate_integer(ec->common.handle, "_GPE", NULL,
-                                 &ec->common.gpe_bit);
+           acpi_evaluate_integer(ec->handle, "_GPE", NULL,
+                                 &ec->gpe_bit);
        if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR PREFIX "Obtaining GPE bit assignment\n");
+               ACPI_EXCEPTION((AE_INFO, status, "Obtaining GPE bit assignment"));
                result = -ENODEV;
                goto end;
        }
@@ -1102,14 +700,14 @@ static int acpi_ec_intr_add(struct acpi_device *device)
        if (result)
                goto end;
 
-       printk(KERN_INFO PREFIX "%s [%s] (gpe %d) interrupt mode.\n",
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s [%s] (gpe %d) interrupt mode.",
               acpi_device_name(device), acpi_device_bid(device),
-              (u32) ec->common.gpe_bit);
+              (u32) ec->gpe_bit));
 
        if (!first_ec)
                first_ec = device;
 
-      end:
+  end:
        if (result)
                kfree(ec);
 
@@ -1118,7 +716,7 @@ static int acpi_ec_intr_add(struct acpi_device *device)
 
 static int acpi_ec_remove(struct acpi_device *device, int type)
 {
-       union acpi_ec *ec = NULL;
+       struct acpi_ec *ec = NULL;
 
 
        if (!device)
@@ -1136,8 +734,7 @@ static int acpi_ec_remove(struct acpi_device *device, int type)
 static acpi_status
 acpi_ec_io_ports(struct acpi_resource *resource, void *context)
 {
-       union acpi_ec *ec = (union acpi_ec *)context;
-       struct acpi_generic_address *addr;
+       struct acpi_ec *ec = (struct acpi_ec *)context;
 
        if (resource->type != ACPI_RESOURCE_TYPE_IO) {
                return AE_OK;
@@ -1148,26 +745,21 @@ acpi_ec_io_ports(struct acpi_resource *resource, void *context)
         * the second address region returned is the status/command
         * port.
         */
-       if (ec->common.data_addr.register_bit_width == 0) {
-               addr = &ec->common.data_addr;
-       } else if (ec->common.command_addr.register_bit_width == 0) {
-               addr = &ec->common.command_addr;
+       if (ec->data_addr == 0) {
+               ec->data_addr = resource->data.io.minimum;
+       } else if (ec->command_addr == 0) {
+               ec->command_addr = resource->data.io.minimum;
        } else {
                return AE_CTRL_TERMINATE;
        }
 
-       addr->address_space_id = ACPI_ADR_SPACE_SYSTEM_IO;
-       addr->register_bit_width = 8;
-       addr->register_bit_offset = 0;
-       addr->address = resource->data.io.minimum;
-
        return AE_OK;
 }
 
 static int acpi_ec_start(struct acpi_device *device)
 {
        acpi_status status = AE_OK;
-       union acpi_ec *ec = NULL;
+       struct acpi_ec *ec = NULL;
 
 
        if (!device)
@@ -1181,39 +773,35 @@ static int acpi_ec_start(struct acpi_device *device)
        /*
         * Get I/O port addresses. Convert to GAS format.
         */
-       status = acpi_walk_resources(ec->common.handle, METHOD_NAME__CRS,
+       status = acpi_walk_resources(ec->handle, METHOD_NAME__CRS,
                                     acpi_ec_io_ports, ec);
-       if (ACPI_FAILURE(status)
-           || ec->common.command_addr.register_bit_width == 0) {
-               printk(KERN_ERR PREFIX "Error getting I/O port addresses\n");
+       if (ACPI_FAILURE(status) || ec->command_addr == 0) {
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "Error getting I/O port addresses"));
                return -ENODEV;
        }
 
-       ec->common.status_addr = ec->common.command_addr;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "gpe=0x%02x, ports=0x%2x,0x%2x\n",
-                         (u32) ec->common.gpe_bit,
-                         (u32) ec->common.command_addr.address,
-                         (u32) ec->common.data_addr.address));
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "gpe=0x%02lx, ports=0x%2lx,0x%2lx",
+                         ec->gpe_bit, ec->command_addr, ec->data_addr));
 
        /*
         * Install GPE handler
         */
-       status = acpi_install_gpe_handler(NULL, ec->common.gpe_bit,
+       status = acpi_install_gpe_handler(NULL, ec->gpe_bit,
                                          ACPI_GPE_EDGE_TRIGGERED,
                                          &acpi_ec_gpe_handler, ec);
        if (ACPI_FAILURE(status)) {
                return -ENODEV;
        }
-       acpi_set_gpe_type(NULL, ec->common.gpe_bit, ACPI_GPE_TYPE_RUNTIME);
-       acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR);
+       acpi_set_gpe_type(NULL, ec->gpe_bit, ACPI_GPE_TYPE_RUNTIME);
+       acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR);
 
-       status = acpi_install_address_space_handler(ec->common.handle,
+       status = acpi_install_address_space_handler(ec->handle,
                                                    ACPI_ADR_SPACE_EC,
                                                    &acpi_ec_space_handler,
                                                    &acpi_ec_space_setup, ec);
        if (ACPI_FAILURE(status)) {
-               acpi_remove_gpe_handler(NULL, ec->common.gpe_bit,
+               acpi_remove_gpe_handler(NULL, ec->gpe_bit,
                                        &acpi_ec_gpe_handler);
                return -ENODEV;
        }
@@ -1224,7 +812,7 @@ static int acpi_ec_start(struct acpi_device *device)
 static int acpi_ec_stop(struct acpi_device *device, int type)
 {
        acpi_status status = AE_OK;
-       union acpi_ec *ec = NULL;
+       struct acpi_ec *ec = NULL;
 
 
        if (!device)
@@ -1232,14 +820,14 @@ static int acpi_ec_stop(struct acpi_device *device, int type)
 
        ec = acpi_driver_data(device);
 
-       status = acpi_remove_address_space_handler(ec->common.handle,
+       status = acpi_remove_address_space_handler(ec->handle,
                                                   ACPI_ADR_SPACE_EC,
                                                   &acpi_ec_space_handler);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
        status =
-           acpi_remove_gpe_handler(NULL, ec->common.gpe_bit,
+           acpi_remove_gpe_handler(NULL, ec->gpe_bit,
                                    &acpi_ec_gpe_handler);
        if (ACPI_FAILURE(status))
                return -ENODEV;
@@ -1251,76 +839,30 @@ static acpi_status __init
 acpi_fake_ecdt_callback(acpi_handle handle,
                        u32 Level, void *context, void **retval)
 {
-
-       if (acpi_ec_poll_mode)
-               return acpi_fake_ecdt_poll_callback(handle,
-                                                      Level, context, retval);
-       else
-               return acpi_fake_ecdt_intr_callback(handle,
-                                                    Level, context, retval);
-}
-
-static acpi_status __init
-acpi_fake_ecdt_poll_callback(acpi_handle handle,
-                               u32 Level, void *context, void **retval)
-{
-       acpi_status status;
-
-       status = acpi_walk_resources(handle, METHOD_NAME__CRS,
-                                    acpi_ec_io_ports, ec_ecdt);
-       if (ACPI_FAILURE(status))
-               return status;
-       ec_ecdt->common.status_addr = ec_ecdt->common.command_addr;
-
-       ec_ecdt->common.uid = -1;
-       acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->common.uid);
-
-       status =
-           acpi_evaluate_integer(handle, "_GPE", NULL,
-                                 &ec_ecdt->common.gpe_bit);
-       if (ACPI_FAILURE(status))
-               return status;
-       init_MUTEX(&ec_ecdt->poll.sem);
-       ec_ecdt->common.global_lock = TRUE;
-       ec_ecdt->common.handle = handle;
-
-       printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n",
-              (u32) ec_ecdt->common.gpe_bit,
-              (u32) ec_ecdt->common.command_addr.address,
-              (u32) ec_ecdt->common.data_addr.address);
-
-       return AE_CTRL_TERMINATE;
-}
-
-static acpi_status __init
-acpi_fake_ecdt_intr_callback(acpi_handle handle,
-                             u32 Level, void *context, void **retval)
-{
        acpi_status status;
 
-       init_MUTEX(&ec_ecdt->intr.sem);
-       init_waitqueue_head(&ec_ecdt->intr.wait);
+       init_MUTEX(&ec_ecdt->sem);
+       if (acpi_ec_mode == EC_INTR) {
+               init_waitqueue_head(&ec_ecdt->wait);
+       }
        status = acpi_walk_resources(handle, METHOD_NAME__CRS,
                                     acpi_ec_io_ports, ec_ecdt);
        if (ACPI_FAILURE(status))
                return status;
-       ec_ecdt->common.status_addr = ec_ecdt->common.command_addr;
 
-       ec_ecdt->common.uid = -1;
-       acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->common.uid);
+       ec_ecdt->uid = -1;
+       acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->uid);
 
        status =
            acpi_evaluate_integer(handle, "_GPE", NULL,
-                                 &ec_ecdt->common.gpe_bit);
+                                 &ec_ecdt->gpe_bit);
        if (ACPI_FAILURE(status))
                return status;
-       ec_ecdt->common.global_lock = TRUE;
-       ec_ecdt->common.handle = handle;
+       ec_ecdt->global_lock = TRUE;
+       ec_ecdt->handle = handle;
 
-       printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n",
-              (u32) ec_ecdt->common.gpe_bit,
-              (u32) ec_ecdt->common.command_addr.address,
-              (u32) ec_ecdt->common.data_addr.address);
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "GPE=0x%02lx, ports=0x%2lx, 0x%2lx",
+              ec_ecdt->gpe_bit, ec_ecdt->command_addr, ec_ecdt->data_addr));
 
        return AE_CTRL_TERMINATE;
 }
@@ -1340,14 +882,14 @@ static int __init acpi_ec_fake_ecdt(void)
        acpi_status status;
        int ret = 0;
 
-       printk(KERN_INFO PREFIX "Try to make an fake ECDT\n");
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Try to make an fake ECDT"));
 
-       ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL);
+       ec_ecdt = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
        if (!ec_ecdt) {
                ret = -ENOMEM;
                goto error;
        }
-       memset(ec_ecdt, 0, sizeof(union acpi_ec));
+       memset(ec_ecdt, 0, sizeof(struct acpi_ec));
 
        status = acpi_get_devices(ACPI_EC_HID,
                                  acpi_fake_ecdt_callback, NULL, NULL);
@@ -1355,23 +897,15 @@ static int __init acpi_ec_fake_ecdt(void)
                kfree(ec_ecdt);
                ec_ecdt = NULL;
                ret = -ENODEV;
+               ACPI_EXCEPTION((AE_INFO, status, "Can't make an fake ECDT"));
                goto error;
        }
        return 0;
-      error:
-       printk(KERN_ERR PREFIX "Can't make an fake ECDT\n");
+  error:
        return ret;
 }
 
 static int __init acpi_ec_get_real_ecdt(void)
-{
-       if (acpi_ec_poll_mode)
-               return acpi_ec_poll_get_real_ecdt();
-       else
-               return acpi_ec_intr_get_real_ecdt();
-}
-
-static int __init acpi_ec_poll_get_real_ecdt(void)
 {
        acpi_status status;
        struct acpi_table_ecdt *ecdt_ptr;
@@ -1382,80 +916,36 @@ static int __init acpi_ec_poll_get_real_ecdt(void)
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
-       printk(KERN_INFO PREFIX "Found ECDT\n");
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found ECDT"));
 
        /*
         * Generate a temporary ec context to use until the namespace is scanned
         */
-       ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL);
+       ec_ecdt = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
        if (!ec_ecdt)
                return -ENOMEM;
-       memset(ec_ecdt, 0, sizeof(union acpi_ec));
-
-       ec_ecdt->common.command_addr = ecdt_ptr->ec_control;
-       ec_ecdt->common.status_addr = ecdt_ptr->ec_control;
-       ec_ecdt->common.data_addr = ecdt_ptr->ec_data;
-       ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit;
-       init_MUTEX(&ec_ecdt->poll.sem);
-       /* use the GL just to be safe */
-       ec_ecdt->common.global_lock = TRUE;
-       ec_ecdt->common.uid = ecdt_ptr->uid;
+       memset(ec_ecdt, 0, sizeof(struct acpi_ec));
 
-       status =
-           acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->common.handle);
-       if (ACPI_FAILURE(status)) {
-               goto error;
+       init_MUTEX(&ec_ecdt->sem);
+       if (acpi_ec_mode == EC_INTR) {
+               init_waitqueue_head(&ec_ecdt->wait);
        }
-
-       return 0;
-      error:
-       printk(KERN_ERR PREFIX "Could not use ECDT\n");
-       kfree(ec_ecdt);
-       ec_ecdt = NULL;
-
-       return -ENODEV;
-}
-
-static int __init acpi_ec_intr_get_real_ecdt(void)
-{
-       acpi_status status;
-       struct acpi_table_ecdt *ecdt_ptr;
-
-       status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING,
-                                        (struct acpi_table_header **)
-                                        &ecdt_ptr);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
-
-       printk(KERN_INFO PREFIX "Found ECDT\n");
-
-       /*
-        * Generate a temporary ec context to use until the namespace is scanned
-        */
-       ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL);
-       if (!ec_ecdt)
-               return -ENOMEM;
-       memset(ec_ecdt, 0, sizeof(union acpi_ec));
-
-       init_MUTEX(&ec_ecdt->intr.sem);
-       init_waitqueue_head(&ec_ecdt->intr.wait);
-       ec_ecdt->common.command_addr = ecdt_ptr->ec_control;
-       ec_ecdt->common.status_addr = ecdt_ptr->ec_control;
-       ec_ecdt->common.data_addr = ecdt_ptr->ec_data;
-       ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit;
+       ec_ecdt->command_addr = ecdt_ptr->ec_control.address;
+       ec_ecdt->data_addr = ecdt_ptr->ec_data.address;
+       ec_ecdt->gpe_bit = ecdt_ptr->gpe_bit;
        /* use the GL just to be safe */
-       ec_ecdt->common.global_lock = TRUE;
-       ec_ecdt->common.uid = ecdt_ptr->uid;
+       ec_ecdt->global_lock = TRUE;
+       ec_ecdt->uid = ecdt_ptr->uid;
 
        status =
-           acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->common.handle);
+           acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle);
        if (ACPI_FAILURE(status)) {
                goto error;
        }
 
        return 0;
-      error:
-       printk(KERN_ERR PREFIX "Could not use ECDT\n");
+  error:
+       ACPI_EXCEPTION((AE_INFO, status, "Could not use ECDT"));
        kfree(ec_ecdt);
        ec_ecdt = NULL;
 
@@ -1480,14 +970,14 @@ int __init acpi_ec_ecdt_probe(void)
        /*
         * Install GPE handler
         */
-       status = acpi_install_gpe_handler(NULL, ec_ecdt->common.gpe_bit,
+       status = acpi_install_gpe_handler(NULL, ec_ecdt->gpe_bit,
                                          ACPI_GPE_EDGE_TRIGGERED,
                                          &acpi_ec_gpe_handler, ec_ecdt);
        if (ACPI_FAILURE(status)) {
                goto error;
        }
-       acpi_set_gpe_type(NULL, ec_ecdt->common.gpe_bit, ACPI_GPE_TYPE_RUNTIME);
-       acpi_enable_gpe(NULL, ec_ecdt->common.gpe_bit, ACPI_NOT_ISR);
+       acpi_set_gpe_type(NULL, ec_ecdt->gpe_bit, ACPI_GPE_TYPE_RUNTIME);
+       acpi_enable_gpe(NULL, ec_ecdt->gpe_bit, ACPI_NOT_ISR);
 
        status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
                                                    ACPI_ADR_SPACE_EC,
@@ -1495,7 +985,7 @@ int __init acpi_ec_ecdt_probe(void)
                                                    &acpi_ec_space_setup,
                                                    ec_ecdt);
        if (ACPI_FAILURE(status)) {
-               acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit,
+               acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit,
                                        &acpi_ec_gpe_handler);
                goto error;
        }
@@ -1503,7 +993,7 @@ int __init acpi_ec_ecdt_probe(void)
        return 0;
 
       error:
-       printk(KERN_ERR PREFIX "Could not use ECDT\n");
+       ACPI_EXCEPTION((AE_INFO, status, "Could not use ECDT"));
        kfree(ec_ecdt);
        ec_ecdt = NULL;
 
@@ -1562,13 +1052,13 @@ static int __init acpi_ec_set_intr_mode(char *str)
                return 0;
 
        if (intr) {
-               acpi_ec_poll_mode = EC_INTR;
-               acpi_ec_driver.ops.add = acpi_ec_intr_add;
+               acpi_ec_mode = EC_INTR;
        } else {
-               acpi_ec_poll_mode = EC_POLL;
-               acpi_ec_driver.ops.add = acpi_ec_poll_add;
+               acpi_ec_mode = EC_POLL;
        }
-       printk(KERN_INFO PREFIX "EC %s mode.\n", intr ? "interrupt" : "polling");
+       acpi_ec_driver.ops.add = acpi_ec_add;
+       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "EC %s mode.\n", intr ? "interrupt" : "polling"));
+
        return 1;
 }
 
index 6eef4efddcf62b0f83fba9f2014b61403e4564fe..ee2a10bf907745ceef823a5d9a2ecd637ae51b2c 100644 (file)
@@ -342,20 +342,8 @@ static u32 acpi_ev_global_lock_handler(void *context)
        if (acquired) {
 
                /* Got the lock, now wake all threads waiting for it */
-
                acpi_gbl_global_lock_acquired = TRUE;
-
-               /* Run the Global Lock thread which will signal all waiting threads */
-
-               status =
-                   acpi_os_execute(OSL_GLOBAL_LOCK_HANDLER,
-                                   acpi_ev_global_lock_thread, context);
-               if (ACPI_FAILURE(status)) {
-                       ACPI_EXCEPTION((AE_INFO, status,
-                                       "Could not queue Global Lock thread"));
-
-                       return (ACPI_INTERRUPT_NOT_HANDLED);
-               }
+               acpi_ev_global_lock_thread(context);
        }
 
        return (ACPI_INTERRUPT_HANDLED);
index 5b3c7a85eb9a609e7e1fcbc8910f15e95e5f1f26..203d1359190af2c8d52f694f506eda23d75de4bd 100644 (file)
@@ -225,13 +225,12 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
                                if (!
                                    (ACPI_STRNCMP
                                     (object_hID.value, PCI_ROOT_HID_STRING,
-                                     sizeof(PCI_ROOT_HID_STRING))
-                                    ||
-                                    !(ACPI_STRNCMP
-                                      (object_hID.value,
-                                       PCI_EXPRESS_ROOT_HID_STRING,
-                                       sizeof(PCI_EXPRESS_ROOT_HID_STRING)))))
-                               {
+                                     sizeof(PCI_ROOT_HID_STRING)))
+                                   ||
+                                   !(ACPI_STRNCMP
+                                     (object_hID.value,
+                                      PCI_EXPRESS_ROOT_HID_STRING,
+                                      sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
 
                                        /* Install a handler for this PCI root bridge */
 
index 15fc12482ba0ab723b8121f13b9562e128e9cc32..003a9876c9683bee7de662e5959f06ec42ed91f9 100644 (file)
@@ -1702,13 +1702,11 @@ static struct ibm_struct ibms[] = {
         .name = "brightness",
         .read = brightness_read,
         .write = brightness_write,
-        .experimental = 1,
         },
        {
         .name = "volume",
         .read = volume_read,
         .write = volume_write,
-        .experimental = 1,
         },
        {
         .name = "fan",
index ec6b7f9ede34d911818075d75eb11f7d95f02b87..2e17ec75af03e84756689ce8bca915f2b0e3c2d5 100644 (file)
@@ -48,6 +48,12 @@ ACPI_MODULE_NAME("acpi_motherboard")
  * the io ports if they really know they can use it, while
  * still preventing hotplug PCI devices from using it.
  */
+
+/*
+ * When CONFIG_PNP is enabled, pnp/system.c binds to PNP0C01
+ * and PNP0C02, redundant with acpi_reserve_io_ranges().
+ * But acpi_reserve_io_ranges() is necessary for !CONFIG_PNP.
+ */
 static acpi_status acpi_reserve_io_ranges(struct acpi_resource *res, void *data)
 {
        struct resource *requested_res = NULL;
index 068fe4f100b0f8e609cd73576f53930c66bdbb4c..c84286cbbe2571e3c5b4fb62bf60b93942ff941a 100644 (file)
@@ -73,6 +73,7 @@ static unsigned int acpi_irq_irq;
 static acpi_osd_handler acpi_irq_handler;
 static void *acpi_irq_context;
 static struct workqueue_struct *kacpid_wq;
+static struct workqueue_struct *kacpi_notify_wq;
 
 acpi_status acpi_os_initialize(void)
 {
@@ -91,8 +92,9 @@ acpi_status acpi_os_initialize1(void)
                return AE_NULL_ENTRY;
        }
        kacpid_wq = create_singlethread_workqueue("kacpid");
+       kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
        BUG_ON(!kacpid_wq);
-
+       BUG_ON(!kacpi_notify_wq);
        return AE_OK;
 }
 
@@ -104,6 +106,7 @@ acpi_status acpi_os_terminate(void)
        }
 
        destroy_workqueue(kacpid_wq);
+       destroy_workqueue(kacpi_notify_wq);
 
        return AE_OK;
 }
@@ -566,10 +569,7 @@ void acpi_os_derive_pci_id(acpi_handle rhandle,    /* upper bound  */
 
 static void acpi_os_execute_deferred(void *context)
 {
-       struct acpi_os_dpc *dpc = NULL;
-
-
-       dpc = (struct acpi_os_dpc *)context;
+       struct acpi_os_dpc *dpc = (struct acpi_os_dpc *)context;
        if (!dpc) {
                printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
                return;
@@ -604,14 +604,12 @@ acpi_status acpi_os_execute(acpi_execute_type type,
        struct acpi_os_dpc *dpc;
        struct work_struct *task;
 
-       ACPI_FUNCTION_TRACE("os_queue_for_execution");
-
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
                          "Scheduling function [%p(%p)] for deferred execution.\n",
                          function, context));
 
        if (!function)
-               return_ACPI_STATUS(AE_BAD_PARAMETER);
+               return AE_BAD_PARAMETER;
 
        /*
         * Allocate/initialize DPC structure.  Note that this memory will be
@@ -624,26 +622,20 @@ acpi_status acpi_os_execute(acpi_execute_type type,
         * from the same memory.
         */
 
-       dpc =
-           kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct),
-                   GFP_ATOMIC);
+       dpc = kmalloc(sizeof(struct acpi_os_dpc) +
+                       sizeof(struct work_struct), GFP_ATOMIC);
        if (!dpc)
-               return_ACPI_STATUS(AE_NO_MEMORY);
-
+               return AE_NO_MEMORY;
        dpc->function = function;
        dpc->context = context;
-
        task = (void *)(dpc + 1);
        INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc);
-
-       if (!queue_work(kacpid_wq, task)) {
-               ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
-                                 "Call to queue_work() failed.\n"));
-               kfree(dpc);
+       if (!queue_work((type == OSL_NOTIFY_HANDLER)?
+                       kacpi_notify_wq : kacpid_wq, task)) {
                status = AE_ERROR;
+               kfree(dpc);
        }
-
-       return_ACPI_STATUS(status);
+       return status;
 }
 
 EXPORT_SYMBOL(acpi_os_execute);
index 7f3e7e77e79436e9984c52a4d6c80345f3f3265d..d53bd9878ca2ccddcee6cde058d8781c74b698ba 100644 (file)
@@ -307,7 +307,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
        if (!link || !irq)
                return -EINVAL;
 
-       resource = kmalloc(sizeof(*resource) + 1, GFP_ATOMIC);
+       resource = kmalloc(sizeof(*resource) + 1, irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
        if (!resource)
                return -ENOMEM;
 
index fec225d1b6b74520f6b132c2bcefd5dcb600b484..fe67a8af520ecdb715e020511b028c3b6d7f865c 100644 (file)
@@ -216,10 +216,8 @@ static int acpi_power_off_device(acpi_handle handle)
 {
        int result = 0;
        acpi_status status = AE_OK;
-       struct acpi_device *device = NULL;
        struct acpi_power_resource *resource = NULL;
 
-
        result = acpi_power_get_context(handle, &resource);
        if (result)
                return result;
@@ -230,13 +228,13 @@ static int acpi_power_off_device(acpi_handle handle)
        if (resource->references) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "Resource [%s] is still in use, dereferencing\n",
-                                 device->pnp.bus_id));
+                                 resource->device->pnp.bus_id));
                return 0;
        }
 
        if (resource->state == ACPI_POWER_RESOURCE_STATE_OFF) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] already off\n",
-                                 device->pnp.bus_id));
+                                 resource->device->pnp.bus_id));
                return 0;
        }
 
@@ -251,8 +249,7 @@ static int acpi_power_off_device(acpi_handle handle)
                return -ENOEXEC;
 
        /* Update the power resource's _device_ power state */
-       device = resource->device;
-       device->power.state = ACPI_STATE_D3;
+       resource->device->power.state = ACPI_STATE_D3;
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource [%s] turned off\n",
                          resource->name));
index b13d64415b7ab651fc42be5b13351427d9dad6a3..1908e0d202226a47b0f839439a84f599d91649b1 100644 (file)
@@ -519,7 +519,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
 
 static void *processor_device_array[NR_CPUS];
 
-static int acpi_processor_start(struct acpi_device *device)
+static int __cpuinit acpi_processor_start(struct acpi_device *device)
 {
        int result = 0;
        acpi_status status = AE_OK;
index 0a395fca843b46686b748ffe1583515cc863e9af..e67144cf3c8b91f865c9bdc2e64a441a875f6046 100644 (file)
@@ -219,6 +219,23 @@ static void acpi_safe_halt(void)
 
 static atomic_t c3_cpu_count;
 
+/* Common C-state entry for C2, C3, .. */
+static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
+{
+       if (cstate->space_id == ACPI_CSTATE_FFH) {
+               /* Call into architectural FFH based C-state */
+               acpi_processor_ffh_cstate_enter(cstate);
+       } else {
+               int unused;
+               /* IO port based C-state */
+               inb(cstate->address);
+               /* Dummy wait op - must do something useless after P_LVL2 read
+                  because chipsets cannot guarantee that STPCLK# signal
+                  gets asserted in time to freeze execution properly. */
+               unused = inl(acpi_fadt.xpm_tmr_blk.address);
+       }
+}
+
 static void acpi_processor_idle(void)
 {
        struct acpi_processor *pr = NULL;
@@ -361,11 +378,7 @@ static void acpi_processor_idle(void)
                /* Get start time (ticks) */
                t1 = inl(acpi_fadt.xpm_tmr_blk.address);
                /* Invoke C2 */
-               inb(cx->address);
-               /* Dummy wait op - must do something useless after P_LVL2 read
-                  because chipsets cannot guarantee that STPCLK# signal
-                  gets asserted in time to freeze execution properly. */
-               t2 = inl(acpi_fadt.xpm_tmr_blk.address);
+               acpi_cstate_enter(cx);
                /* Get end time (ticks) */
                t2 = inl(acpi_fadt.xpm_tmr_blk.address);
 
@@ -401,9 +414,7 @@ static void acpi_processor_idle(void)
                /* Get start time (ticks) */
                t1 = inl(acpi_fadt.xpm_tmr_blk.address);
                /* Invoke C3 */
-               inb(cx->address);
-               /* Dummy wait op (see above) */
-               t2 = inl(acpi_fadt.xpm_tmr_blk.address);
+               acpi_cstate_enter(cx);
                /* Get end time (ticks) */
                t2 = inl(acpi_fadt.xpm_tmr_blk.address);
                if (pr->flags.bm_check) {
@@ -628,20 +639,16 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
        return 0;
 }
 
-static int acpi_processor_get_power_info_default_c1(struct acpi_processor *pr)
+static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
 {
-
-       /* Zero initialize all the C-states info. */
-       memset(pr->power.states, 0, sizeof(pr->power.states));
-
-       /* set the first C-State to C1 */
-       pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
-
-       /* the C0 state only exists as a filler in our array,
-        * and all processors need to support C1 */
+       if (!pr->power.states[ACPI_STATE_C1].valid) {
+               /* set the first C-State to C1 */
+               /* all processors need to support C1 */
+               pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
+               pr->power.states[ACPI_STATE_C1].valid = 1;
+       }
+       /* the C0 state only exists as a filler in our array */
        pr->power.states[ACPI_STATE_C0].valid = 1;
-       pr->power.states[ACPI_STATE_C1].valid = 1;
-
        return 0;
 }
 
@@ -658,12 +665,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
        if (nocst)
                return -ENODEV;
 
-       current_count = 1;
-
-       /* Zero initialize C2 onwards and prepare for fresh CST lookup */
-       for (i = 2; i < ACPI_PROCESSOR_MAX_POWER; i++)
-               memset(&(pr->power.states[i]), 0, 
-                               sizeof(struct acpi_processor_cx));
+       current_count = 0;
 
        status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
        if (ACPI_FAILURE(status)) {
@@ -718,22 +720,39 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
                    (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
                        continue;
 
-               cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ?
-                   0 : reg->address;
-
                /* There should be an easy way to extract an integer... */
                obj = (union acpi_object *)&(element->package.elements[1]);
                if (obj->type != ACPI_TYPE_INTEGER)
                        continue;
 
                cx.type = obj->integer.value;
-
-               if ((cx.type != ACPI_STATE_C1) &&
-                   (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO))
-                       continue;
-
-               if ((cx.type < ACPI_STATE_C2) || (cx.type > ACPI_STATE_C3))
-                       continue;
+               /*
+                * Some buggy BIOSes won't list C1 in _CST -
+                * Let acpi_processor_get_power_info_default() handle them later
+                */
+               if (i == 1 && cx.type != ACPI_STATE_C1)
+                       current_count++;
+
+               cx.address = reg->address;
+               cx.index = current_count + 1;
+
+               cx.space_id = ACPI_CSTATE_SYSTEMIO;
+               if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
+                       if (acpi_processor_ffh_cstate_probe
+                                       (pr->id, &cx, reg) == 0) {
+                               cx.space_id = ACPI_CSTATE_FFH;
+                       } else if (cx.type != ACPI_STATE_C1) {
+                               /*
+                                * C1 is a special case where FIXED_HARDWARE
+                                * can be handled in non-MWAIT way as well.
+                                * In that case, save this _CST entry info.
+                                * That is, we retain space_id of SYSTEM_IO for
+                                * halt based C1.
+                                * Otherwise, ignore this info and continue.
+                                */
+                               continue;
+                       }
+               }
 
                obj = (union acpi_object *)&(element->package.elements[2]);
                if (obj->type != ACPI_TYPE_INTEGER)
@@ -938,12 +957,18 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
        /* NOTE: the idle thread may not be running while calling
         * this function */
 
-       /* Adding C1 state */
-       acpi_processor_get_power_info_default_c1(pr);
+       /* Zero initialize all the C-states info. */
+       memset(pr->power.states, 0, sizeof(pr->power.states));
+
        result = acpi_processor_get_power_info_cst(pr);
        if (result == -ENODEV)
                acpi_processor_get_power_info_fadt(pr);
 
+       if (result)
+               return result;
+
+       acpi_processor_get_power_info_default(pr);
+
        pr->power.count = acpi_processor_power_verify(pr);
 
        /*
@@ -1083,6 +1108,7 @@ static const struct file_operations acpi_processor_power_fops = {
        .release = single_release,
 };
 
+#ifdef CONFIG_SMP
 static void smp_callback(void *v)
 {
        /* we already woke the CPU up, nothing more to do */
@@ -1104,8 +1130,9 @@ static int acpi_processor_latency_notify(struct notifier_block *b,
 static struct notifier_block acpi_processor_latency_notifier = {
        .notifier_call = acpi_processor_latency_notify,
 };
+#endif
 
-int acpi_processor_power_init(struct acpi_processor *pr,
+int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
                              struct acpi_device *device)
 {
        acpi_status status = 0;
@@ -1121,7 +1148,9 @@ int acpi_processor_power_init(struct acpi_processor *pr,
                               "ACPI: processor limited to max C-state %d\n",
                               max_cstate);
                first_run++;
+#ifdef CONFIG_SMP
                register_latency_notifier(&acpi_processor_latency_notifier);
+#endif
        }
 
        if (!pr)
@@ -1193,7 +1222,9 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
                 * copies of pm_idle before proceeding.
                 */
                cpu_idle_wait();
+#ifdef CONFIG_SMP
                unregister_latency_notifier(&acpi_processor_latency_notifier);
+#endif
        }
 
        return 0;
index 62bef0b3b614aef8d10742cd3494e9ecca149675..8908a975e5753ca028179298e008502b774eb3f2 100644 (file)
@@ -98,11 +98,11 @@ static int update_info_mode = UPDATE_INFO_MODE;
 static int update_time = UPDATE_TIME;
 static int update_time2 = UPDATE_TIME2;
 
-module_param(capacity_mode, int, CAPACITY_UNIT);
-module_param(update_mode, int, UPDATE_MODE);
-module_param(update_info_mode, int, UPDATE_INFO_MODE);
-module_param(update_time, int, UPDATE_TIME);
-module_param(update_time2, int, UPDATE_TIME2);
+module_param(capacity_mode, int, 0);
+module_param(update_mode, int, 0);
+module_param(update_info_mode, int, 0);
+module_param(update_time, int, 0);
+module_param(update_time2, int, 0);
 
 static int acpi_sbs_add(struct acpi_device *device);
 static int acpi_sbs_remove(struct acpi_device *device, int type);
@@ -1685,10 +1685,16 @@ static int acpi_sbs_add(struct acpi_device *device)
 
 int acpi_sbs_remove(struct acpi_device *device, int type)
 {
-       struct acpi_sbs *sbs = (struct acpi_sbs *)acpi_driver_data(device);
+       struct acpi_sbs *sbs = NULL;
        int id;
 
-       if (!device || !sbs) {
+       if (!device) {
+               return -EINVAL;
+       }
+
+       sbs = (struct acpi_sbs *)acpi_driver_data(device);
+
+       if (!sbs) {
                return -EINVAL;
        }
 
index 7856db759af0be7579abcd1733e74d1782455dbe..11e2d4454e053f6ece5f8d6aab31e5b6491e8f0a 100644 (file)
@@ -324,7 +324,7 @@ acpi_tb_get_this_table(struct acpi_pointer *address,
 
        if (header->length < sizeof(struct acpi_table_header)) {
                ACPI_ERROR((AE_INFO,
-                           "Table length (%X) is smaller than minimum (%X)",
+                           "Table length (%X) is smaller than minimum (%zX)",
                            header->length, sizeof(struct acpi_table_header)));
 
                return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
index 0ad3dbb9ebca377563a6a177ab092704f63545f8..86a5fca9b739de2b9d42344dc6c5406c55f17f73 100644 (file)
@@ -187,7 +187,7 @@ acpi_status acpi_tb_validate_rsdt(struct acpi_table_header *table_ptr)
 
        if (table_ptr->length < sizeof(struct acpi_table_header)) {
                ACPI_ERROR((AE_INFO,
-                           "RSDT/XSDT length (%X) is smaller than minimum (%X)",
+                           "RSDT/XSDT length (%X) is smaller than minimum (%zX)",
                            table_ptr->length,
                            sizeof(struct acpi_table_header)));
 
index 77138a39eb041d0c2b6a87db88b424bdb51abfd6..83728a9457ad896590e79fa44d55d9e6efedaf8f 100644 (file)
@@ -870,7 +870,11 @@ static unsigned int ata_id_xfermask(const u16 *id)
                 * the PIO timing number for the maximum. Turn it into
                 * a mask.
                 */
-               pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+               u8 mode = id[ATA_ID_OLD_PIO_MODES] & 0xFF;
+               if (mode < 5)   /* Valid PIO range */
+                       pio_mask = (2 << mode) - 1;
+               else
+                       pio_mask = 1;
 
                /* But wait.. there's more. Design your standards by
                 * committee and you too can get a free iordy field to
index b0d0cc41f3e8332041195a71e97c98405a04650c..7af2a4ba49905e068d7eb38c4ad0f778ccc3702e 100644 (file)
@@ -164,10 +164,10 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
 {
        int rc = 0;
        u8 scsi_cmd[MAX_COMMAND_SIZE];
-       u8 args[4], *argbuf = NULL;
+       u8 args[4], *argbuf = NULL, *sensebuf = NULL;
        int argsize = 0;
-       struct scsi_sense_hdr sshdr;
        enum dma_data_direction data_dir;
+       int cmd_result;
 
        if (arg == NULL)
                return -EINVAL;
@@ -175,6 +175,10 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
        if (copy_from_user(args, arg, sizeof(args)))
                return -EFAULT;
 
+       sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
+       if (!sensebuf)
+               return -ENOMEM;
+
        memset(scsi_cmd, 0, sizeof(scsi_cmd));
 
        if (args[3]) {
@@ -191,7 +195,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
                data_dir = DMA_FROM_DEVICE;
        } else {
                scsi_cmd[1]  = (3 << 1); /* Non-data */
-               /* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
+               scsi_cmd[2]  = 0x20;     /* cc but no off.line or data xfer */
                data_dir = DMA_NONE;
        }
 
@@ -210,18 +214,46 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
 
        /* Good values for timeout and retries?  Values below
           from scsi_ioctl_send_command() for default case... */
-       if (scsi_execute_req(scsidev, scsi_cmd, data_dir, argbuf, argsize,
-                            &sshdr, (10*HZ), 5)) {
+       cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
+                                 sensebuf, (10*HZ), 5, 0);
+
+       if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
+               u8 *desc = sensebuf + 8;
+               cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+
+               /* If we set cc then ATA pass-through will cause a
+                * check condition even if no error. Filter that. */
+               if (cmd_result & SAM_STAT_CHECK_CONDITION) {
+                       struct scsi_sense_hdr sshdr;
+                       scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
+                                             &sshdr);
+                       if (sshdr.sense_key==0 &&
+                           sshdr.asc==0 && sshdr.ascq==0)
+                               cmd_result &= ~SAM_STAT_CHECK_CONDITION;
+               }
+
+               /* Send userspace a few ATA registers (same as drivers/ide) */
+               if (sensebuf[0] == 0x72 &&     /* format is "descriptor" */
+                   desc[0] == 0x09 ) {        /* code is "ATA Descriptor" */
+                       args[0] = desc[13];    /* status */
+                       args[1] = desc[3];     /* error */
+                       args[2] = desc[5];     /* sector count (0:7) */
+                       if (copy_to_user(arg, args, sizeof(args)))
+                               rc = -EFAULT;
+               }
+       }
+
+
+       if (cmd_result) {
                rc = -EIO;
                goto error;
        }
 
-       /* Need code to retrieve data from check condition? */
-
        if ((argbuf)
         && copy_to_user(arg + sizeof(args), argbuf, argsize))
                rc = -EFAULT;
 error:
+       kfree(sensebuf);
        kfree(argbuf);
        return rc;
 }
index 7977f471d5e9a0619674f209600121c86b52ce24..2c3cc0ccc6060fd19bdce5631bccb2e4b475005f 100644 (file)
@@ -141,7 +141,7 @@ static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned
                                memcpy(&pad, buf + buflen - slop, slop);
                                outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
                        } else {
-                               pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
+                               pad = cpu_to_le32(inl(ap->ioaddr.data_addr));
                                memcpy(buf + buflen - slop, &pad, slop);
                        }
                }
index 8bcdfa64667c683f5171cee00579d7fbcc91ab22..72eda5160fadea392178458f7d71a1c13f41d2a2 100644 (file)
@@ -260,6 +260,7 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
 #if 0
        { PCI_VDEVICE(PROMISE, 0x3570), board_20771 },
 #endif
+       { PCI_VDEVICE(PROMISE, 0x3577), board_20771 },
 
        { }     /* terminate list */
 };
index cec539e601fefd4f9317962f405a305a8312e65c..6148073532b2951ec9172f6ea0fc069fec58baa0 100644 (file)
@@ -4379,8 +4379,8 @@ static inline void DAC960_P_To_PD_TranslateEnquiry(void *Enquiry)
 static inline void DAC960_P_To_PD_TranslateDeviceState(void *DeviceState)
 {
   memcpy(DeviceState + 2, DeviceState + 3, 1);
-  memcpy(DeviceState + 4, DeviceState + 5, 2);
-  memcpy(DeviceState + 6, DeviceState + 8, 4);
+  memmove(DeviceState + 4, DeviceState + 5, 2);
+  memmove(DeviceState + 6, DeviceState + 8, 4);
 }
 
 static inline
index 5d254b71450915e05100c3e90a6690c77191bf9d..5d6562171533ff31f226756ae025f4fe7a678be7 100644 (file)
@@ -1709,10 +1709,13 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
        return get_disk(unit[drive].gendisk);
 }
 
-int __init amiga_floppy_init(void)
+static int __init amiga_floppy_init(void)
 {
        int i, ret;
 
+       if (!MACH_IS_AMIGA)
+               return -ENXIO;
+
        if (!AMIGAHW_PRESENT(AMI_FLOPPY))
                return -ENXIO;
 
@@ -1809,15 +1812,9 @@ out_blkdev:
        return ret;
 }
 
+module_init(amiga_floppy_init);
 #ifdef MODULE
 
-int init_module(void)
-{
-       if (!MACH_IS_AMIGA)
-               return -ENXIO;
-       return amiga_floppy_init();
-}
-
 #if 0 /* not safe to unload */
 void cleanup_module(void)
 {
index a3f64bfe6b5870eb52229cdd9e9dd36fd6289aaf..485aa87e9bcd6069b7c17a72d2fe946a95100db5 100644 (file)
@@ -432,6 +432,12 @@ static int __init rd_init(void)
                rd_disks[i] = alloc_disk(1);
                if (!rd_disks[i])
                        goto out;
+
+               rd_queue[i] = blk_alloc_queue(GFP_KERNEL);
+               if (!rd_queue[i]) {
+                       put_disk(rd_disks[i]);
+                       goto out;
+               }
        }
 
        if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) {
@@ -442,10 +448,6 @@ static int __init rd_init(void)
        for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) {
                struct gendisk *disk = rd_disks[i];
 
-               rd_queue[i] = blk_alloc_queue(GFP_KERNEL);
-               if (!rd_queue[i])
-                       goto out_queue;
-
                blk_queue_make_request(rd_queue[i], &rd_make_request);
                blk_queue_hardsect_size(rd_queue[i], rd_blocksize);
 
@@ -466,8 +468,6 @@ static int __init rd_init(void)
                CONFIG_BLK_DEV_RAM_COUNT, rd_size, rd_blocksize);
 
        return 0;
-out_queue:
-       unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
 out:
        while (i--) {
                put_disk(rd_disks[i]);
index 10cc38783bdf3c860b0f9c45e06c1714b3c2b490..0d97b7eb818aa69dc39c06229bb484a39eef2978 100644 (file)
@@ -48,9 +48,9 @@
 #include <linux/blkdev.h>
 #include <linux/blkpg.h>
 #include <linux/delay.h>
+#include <linux/io.h>
 
 #include <asm/system.h>
-#include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/dma.h>
 
index 82ddbdd7bd4ba18f0718e388f6db83b8131992a9..7cc2685ca84abb08c29a8ec426cb871e84566ad6 100644 (file)
@@ -329,7 +329,7 @@ static struct kobject *z2_find(dev_t dev, int *part, void *data)
 
 static struct request_queue *z2_queue;
 
-int __init 
+static int __init 
 z2_init(void)
 {
     int ret;
@@ -370,26 +370,7 @@ err:
     return ret;
 }
 
-#if defined(MODULE)
-
-MODULE_LICENSE("GPL");
-
-int
-init_module( void )
-{
-    int error;
-    
-    error = z2_init();
-    if ( error == 0 )
-    {
-       printk( KERN_INFO DEVICE_NAME ": loaded as module\n" );
-    }
-    
-    return error;
-}
-
-void
-cleanup_module( void )
+static void __exit z2_exit(void)
 {
     int i, j;
     blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
@@ -425,4 +406,7 @@ cleanup_module( void )
 
     return;
 } 
-#endif
+
+module_init(z2_init);
+module_exit(z2_exit);
+MODULE_LICENSE("GPL");
index 67cdda43f22990fbdd0e5a9a3b4727fb350876ba..516751754aa9e6d33b4b139f84bcb10edaecb239 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/timer.h>
 
 #include <linux/device.h>
 #include <linux/firmware.h>
@@ -43,7 +42,7 @@
 #define BT_DBG(D...)
 #endif
 
-#define VERSION "1.0"
+#define VERSION "1.1"
 
 static int ignore = 0;
 
@@ -72,7 +71,7 @@ struct bcm203x_data {
 
        unsigned long           state;
 
-       struct timer_list       timer;
+       struct work_struct      work;
 
        struct urb              *urb;
        unsigned char           *buffer;
@@ -105,7 +104,7 @@ static void bcm203x_complete(struct urb *urb)
 
                data->state = BCM203X_SELECT_MEMORY;
 
-               mod_timer(&data->timer, jiffies + (HZ / 10));
+               schedule_work(&data->work);
                break;
 
        case BCM203X_SELECT_MEMORY:
@@ -158,9 +157,9 @@ static void bcm203x_complete(struct urb *urb)
        }
 }
 
-static void bcm203x_timer(unsigned long user_data)
+static void bcm203x_work(void *user_data)
 {
-       struct bcm203x_data *data = (struct bcm203x_data *) user_data;
+       struct bcm203x_data *data = user_data;
 
        if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
                BT_ERR("Can't submit URB");
@@ -247,13 +246,11 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
 
        release_firmware(firmware);
 
-       init_timer(&data->timer);
-       data->timer.function = bcm203x_timer;
-       data->timer.data = (unsigned long) data;
+       INIT_WORK(&data->work, bcm203x_work, (void *) data);
 
        usb_set_intfdata(intf, data);
 
-       mod_timer(&data->timer, jiffies + HZ);
+       schedule_work(&data->work);
 
        return 0;
 }
index 0e6f35fcc2ebf9ed0fd36ddaa74c5faf3b8cedac..39a9f8cc6412a0faa1ed80df4ff12fbf56693f91 100644 (file)
@@ -1046,7 +1046,7 @@ source "drivers/char/tpm/Kconfig"
 
 config TELCLOCK
        tristate "Telecom clock driver for MPBL0010 ATCA SBC"
-       depends on EXPERIMENTAL
+       depends on EXPERIMENTAL && X86
        default n
        help
          The telecom clock device is specific to the MPBL0010 ATCA computer and
index c3f95583a120ca1ca89ebd7d3d4259f024255df7..706733c0b36a7e7af833f97580fb978454dc0ed1 100644 (file)
@@ -1157,6 +1157,7 @@ static int __init pc_init(void)
        int crd;
        struct board_info *bd;
        unsigned char board_id = 0;
+       int err = -ENOMEM;
 
        int pci_boards_found, pci_count;
 
@@ -1164,13 +1165,11 @@ static int __init pc_init(void)
 
        pc_driver = alloc_tty_driver(MAX_ALLOC);
        if (!pc_driver)
-               return -ENOMEM;
+               goto out1;
 
        pc_info = alloc_tty_driver(MAX_ALLOC);
-       if (!pc_info) {
-               put_tty_driver(pc_driver);
-               return -ENOMEM;
-       }
+       if (!pc_info)
+               goto out2;
 
        /* -----------------------------------------------------------------------
                If epca_setup has not been ran by LILO set num_cards to defaults; copy
@@ -1370,11 +1369,17 @@ static int __init pc_init(void)
 
        } /* End for each card */
 
-       if (tty_register_driver(pc_driver))
-               panic("Couldn't register Digi PC/ driver");
+       err = tty_register_driver(pc_driver);
+       if (err) {
+               printk(KERN_ERR "Couldn't register Digi PC/ driver");
+               goto out3;
+       }
 
-       if (tty_register_driver(pc_info))
-               panic("Couldn't register Digi PC/ info ");
+       err = tty_register_driver(pc_info);
+       if (err) {
+               printk(KERN_ERR "Couldn't register Digi PC/ info ");
+               goto out4;
+       }
 
        /* -------------------------------------------------------------------
           Start up the poller to check for events on all enabled boards
@@ -1385,6 +1390,15 @@ static int __init pc_init(void)
        mod_timer(&epca_timer, jiffies + HZ/25);
        return 0;
 
+out4:
+       tty_unregister_driver(pc_driver);
+out3:
+       put_tty_driver(pc_info);
+out2:
+       put_tty_driver(pc_driver);
+out1:
+       return err;
+
 } /* End pc_init */
 
 /* ------------------ Begin post_fep_init  ---------------------- */
index fc944d375be75101429b98e2f160dd41f4e6100e..54d93f0345e8565ae2459ce236bd9a8be49cc8f3 100644 (file)
@@ -1007,7 +1007,7 @@ i2InputAvailable(i2ChanStrPtr pCh)
 // applications that one cannot break out of.
 //******************************************************************************
 static int
-i2Output(i2ChanStrPtr pCh, const char *pSource, int count, int user )
+i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
 {
        i2eBordStrPtr pB;
        unsigned char *pInsert;
@@ -1020,7 +1020,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count, int user )
 
        int bailout = 10;
 
-       ip2trace (CHANN, ITRC_OUTPUT, ITRC_ENTER, 2, count, user );
+       ip2trace (CHANN, ITRC_OUTPUT, ITRC_ENTER, 2, count, 0 );
 
        // Ensure channel structure seems real
        if ( !i2Validate ( pCh ) ) 
@@ -1087,12 +1087,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count, int user )
                        DATA_COUNT_OF(pInsert)  = amountToMove;
 
                        // Move the data
-                       if ( user ) {
-                               rc = copy_from_user((char*)(DATA_OF(pInsert)), pSource,
-                                               amountToMove );
-                       } else {
-                               memcpy( (char*)(DATA_OF(pInsert)), pSource, amountToMove );
-                       }
+                       memcpy( (char*)(DATA_OF(pInsert)), pSource, amountToMove );
                        // Adjust pointers and indices
                        pSource                                 += amountToMove;
                        pCh->Obuf_char_count    += amountToMove;
index 952e113ccd8a48b84c15f6af1b6b535e79f7ab28..e559e9bac06d11645c3832b0bd7464684b727508 100644 (file)
@@ -332,7 +332,7 @@ static int  i2QueueCommands(int, i2ChanStrPtr, int, int, cmdSyntaxPtr,...);
 static int  i2GetStatus(i2ChanStrPtr, int);
 static int  i2Input(i2ChanStrPtr);
 static int  i2InputFlush(i2ChanStrPtr);
-static int  i2Output(i2ChanStrPtr, const char *, int, int);
+static int  i2Output(i2ChanStrPtr, const char *, int);
 static int  i2OutputFree(i2ChanStrPtr);
 static int  i2ServiceBoard(i2eBordStrPtr);
 static void i2DrainOutput(i2ChanStrPtr, int);
index 858ba5432c990f01a7f93aa175bd054b57106084..a3f32d46d2f80300bb1dfc1a3e28a39655bd3b0e 100644 (file)
@@ -1704,7 +1704,7 @@ ip2_write( PTTY tty, const unsigned char *pData, int count)
 
        /* This is the actual move bit. Make sure it does what we need!!!!! */
        WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags);
-       bytesSent = i2Output( pCh, pData, count, 0 );
+       bytesSent = i2Output( pCh, pData, count);
        WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags);
 
        ip2trace (CHANN, ITRC_WRITE, ITRC_RETURN, 1, bytesSent );
@@ -1764,7 +1764,7 @@ ip2_flush_chars( PTTY tty )
                //
                // We may need to restart i2Output if it does not fullfill this request
                //
-               strip = i2Output( pCh, pCh->Pbuf, pCh->Pbuf_stuff, 0 );
+               strip = i2Output( pCh, pCh->Pbuf, pCh->Pbuf_stuff);
                if ( strip != pCh->Pbuf_stuff ) {
                        memmove( pCh->Pbuf, &pCh->Pbuf[strip], pCh->Pbuf_stuff - strip );
                }
index 2455e8d478ace521bd7ef8f85c3ffac7e6a5388e..34a4fd13fa817ec0241a05157593872716b49600 100644 (file)
@@ -1928,13 +1928,8 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
                        (long long) bmc->guid[8]);
 }
 
-static void
-cleanup_bmc_device(struct kref *ref)
+static void remove_files(struct bmc_device *bmc)
 {
-       struct bmc_device *bmc;
-
-       bmc = container_of(ref, struct bmc_device, refcount);
-
        device_remove_file(&bmc->dev->dev,
                           &bmc->device_id_attr);
        device_remove_file(&bmc->dev->dev,
@@ -1951,12 +1946,23 @@ cleanup_bmc_device(struct kref *ref)
                           &bmc->manufacturer_id_attr);
        device_remove_file(&bmc->dev->dev,
                           &bmc->product_id_attr);
+
        if (bmc->id.aux_firmware_revision_set)
                device_remove_file(&bmc->dev->dev,
                                   &bmc->aux_firmware_rev_attr);
        if (bmc->guid_set)
                device_remove_file(&bmc->dev->dev,
                                   &bmc->guid_attr);
+}
+
+static void
+cleanup_bmc_device(struct kref *ref)
+{
+       struct bmc_device *bmc;
+
+       bmc = container_of(ref, struct bmc_device, refcount);
+
+       remove_files(bmc);
        platform_device_unregister(bmc->dev);
        kfree(bmc);
 }
@@ -1977,6 +1983,79 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
        mutex_unlock(&ipmidriver_mutex);
 }
 
+static int create_files(struct bmc_device *bmc)
+{
+       int err;
+
+       err = device_create_file(&bmc->dev->dev,
+                          &bmc->device_id_attr);
+       if (err) goto out;
+       err = device_create_file(&bmc->dev->dev,
+                          &bmc->provides_dev_sdrs_attr);
+       if (err) goto out_devid;
+       err = device_create_file(&bmc->dev->dev,
+                          &bmc->revision_attr);
+       if (err) goto out_sdrs;
+       err = device_create_file(&bmc->dev->dev,
+                          &bmc->firmware_rev_attr);
+       if (err) goto out_rev;
+       err = device_create_file(&bmc->dev->dev,
+                          &bmc->version_attr);
+       if (err) goto out_firm;
+       err = device_create_file(&bmc->dev->dev,
+                          &bmc->add_dev_support_attr);
+       if (err) goto out_version;
+       err = device_create_file(&bmc->dev->dev,
+                          &bmc->manufacturer_id_attr);
+       if (err) goto out_add_dev;
+       err = device_create_file(&bmc->dev->dev,
+                          &bmc->product_id_attr);
+       if (err) goto out_manu;
+       if (bmc->id.aux_firmware_revision_set) {
+               err = device_create_file(&bmc->dev->dev,
+                                  &bmc->aux_firmware_rev_attr);
+               if (err) goto out_prod_id;
+       }
+       if (bmc->guid_set) {
+               err = device_create_file(&bmc->dev->dev,
+                                  &bmc->guid_attr);
+               if (err) goto out_aux_firm;
+       }
+
+       return 0;
+
+out_aux_firm:
+       if (bmc->id.aux_firmware_revision_set)
+               device_remove_file(&bmc->dev->dev,
+                                  &bmc->aux_firmware_rev_attr);
+out_prod_id:
+       device_remove_file(&bmc->dev->dev,
+                          &bmc->product_id_attr);
+out_manu:
+       device_remove_file(&bmc->dev->dev,
+                          &bmc->manufacturer_id_attr);
+out_add_dev:
+       device_remove_file(&bmc->dev->dev,
+                          &bmc->add_dev_support_attr);
+out_version:
+       device_remove_file(&bmc->dev->dev,
+                          &bmc->version_attr);
+out_firm:
+       device_remove_file(&bmc->dev->dev,
+                          &bmc->firmware_rev_attr);
+out_rev:
+       device_remove_file(&bmc->dev->dev,
+                          &bmc->revision_attr);
+out_sdrs:
+       device_remove_file(&bmc->dev->dev,
+                          &bmc->provides_dev_sdrs_attr);
+out_devid:
+       device_remove_file(&bmc->dev->dev,
+                          &bmc->device_id_attr);
+out:
+       return err;
+}
+
 static int ipmi_bmc_register(ipmi_smi_t intf)
 {
        int               rv;
@@ -2051,7 +2130,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
                bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
                bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
 
-
                bmc->revision_attr.attr.name = "revision";
                bmc->revision_attr.attr.owner = THIS_MODULE;
                bmc->revision_attr.attr.mode = S_IRUGO;
@@ -2093,28 +2171,14 @@ static int ipmi_bmc_register(ipmi_smi_t intf)
                bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
                bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
 
-               device_create_file(&bmc->dev->dev,
-                                  &bmc->device_id_attr);
-               device_create_file(&bmc->dev->dev,
-                                  &bmc->provides_dev_sdrs_attr);
-               device_create_file(&bmc->dev->dev,
-                                  &bmc->revision_attr);
-               device_create_file(&bmc->dev->dev,
-                                  &bmc->firmware_rev_attr);
-               device_create_file(&bmc->dev->dev,
-                                  &bmc->version_attr);
-               device_create_file(&bmc->dev->dev,
-                                  &bmc->add_dev_support_attr);
-               device_create_file(&bmc->dev->dev,
-                                  &bmc->manufacturer_id_attr);
-               device_create_file(&bmc->dev->dev,
-                                  &bmc->product_id_attr);
-               if (bmc->id.aux_firmware_revision_set)
-                       device_create_file(&bmc->dev->dev,
-                                          &bmc->aux_firmware_rev_attr);
-               if (bmc->guid_set)
-                       device_create_file(&bmc->dev->dev,
-                                          &bmc->guid_attr);
+               rv = create_files(bmc);
+               if (rv) {
+                       mutex_lock(&ipmidriver_mutex);
+                       platform_device_unregister(bmc->dev);
+                       mutex_unlock(&ipmidriver_mutex);
+
+                       return rv;
+               }
 
                printk(KERN_INFO
                       "ipmi: Found new BMC (man_id: 0x%6.6x, "
index 6511012cbdcd86d971a6cdebcf6e1baad66ccded..55473371b7c6934abe8486af6fc3055701187140 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/backing-dev.h>
 #include <linux/bootmem.h>
 #include <linux/pipe_fs_i.h>
+#include <linux/pfn.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -292,8 +293,8 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
 {
        unsigned long pfn;
 
-       /* Turn a kernel-virtual address into a physical page frame */
-       pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
+       /* Turn a pfn offset into an absolute pfn */
+       pfn = PFN_DOWN(virt_to_phys((void *)PAGE_OFFSET)) + vma->vm_pgoff;
 
        /*
         * RED-PEN: on some architectures there is more mapped memory
index 052e8120a4713ccc2008e8f2f88988bc1adb0985..7ce77619707cf2f4fa1db2480d1acaebaf968ca9 100644 (file)
@@ -662,7 +662,7 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
                        p->RIOError.Error = COPYIN_FAILED;
                        return -EFAULT;
                }
-               if (portStats.port >= RIO_PORTS) {
+               if (portStats.port < 0 || portStats.port >= RIO_PORTS) {
                        p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
                        return -ENXIO;
                }
@@ -702,7 +702,7 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
                        p->RIOError.Error = COPYIN_FAILED;
                        return -EFAULT;
                }
-               if (portStats.port >= RIO_PORTS) {
+               if (portStats.port < 0 || portStats.port >= RIO_PORTS) {
                        p->RIOError.Error = PORT_NUMBER_OUT_OF_RANGE;
                        return -ENXIO;
                }
index 461bfe0234c932a615ae56781cade91a161a0948..3af7f0958c5d85a6a49a96ca8562eb2e31e122d3 100644 (file)
@@ -839,7 +839,7 @@ shutdown(struct cyclades_port * info)
     local_irq_save(flags);
        if (info->xmit_buf){
            free_page((unsigned long) info->xmit_buf);
-           info->xmit_buf = 0;
+           info->xmit_buf = NULL;
        }
 
        base_addr[CyCAR] = (u_char)channel;
@@ -1354,7 +1354,7 @@ cy_unthrottle(struct tty_struct * tty)
 
 static int
 get_serial_info(struct cyclades_port * info,
-                           struct serial_struct * retinfo)
+                           struct serial_struct __user * retinfo)
 {
   struct serial_struct tmp;
 
@@ -1376,7 +1376,7 @@ get_serial_info(struct cyclades_port * info,
 
 static int
 set_serial_info(struct cyclades_port * info,
-                           struct serial_struct * new_info)
+                           struct serial_struct __user * new_info)
 {
   struct serial_struct new_serial;
   struct cyclades_port old_info;
@@ -1503,7 +1503,7 @@ send_break( struct cyclades_port * info, int duration)
 } /* send_break */
 
 static int
-get_mon_info(struct cyclades_port * info, struct cyclades_monitor * mon)
+get_mon_info(struct cyclades_port * info, struct cyclades_monitor __user * mon)
 {
 
    if (copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor)))
@@ -1516,7 +1516,7 @@ get_mon_info(struct cyclades_port * info, struct cyclades_monitor * mon)
 }
 
 static int
-set_threshold(struct cyclades_port * info, unsigned long *arg)
+set_threshold(struct cyclades_port * info, unsigned long __user *arg)
 {
    volatile unsigned char *base_addr = (u_char *)BASE_ADDR;
    unsigned long value;
@@ -1533,7 +1533,7 @@ set_threshold(struct cyclades_port * info, unsigned long *arg)
 }
 
 static int
-get_threshold(struct cyclades_port * info, unsigned long *value)
+get_threshold(struct cyclades_port * info, unsigned long __user *value)
 {
    volatile unsigned char *base_addr = (u_char *)BASE_ADDR;
    int channel;
@@ -1546,7 +1546,7 @@ get_threshold(struct cyclades_port * info, unsigned long *value)
 }
 
 static int
-set_default_threshold(struct cyclades_port * info, unsigned long *arg)
+set_default_threshold(struct cyclades_port * info, unsigned long __user *arg)
 {
    unsigned long value;
 
@@ -1558,13 +1558,13 @@ set_default_threshold(struct cyclades_port * info, unsigned long *arg)
 }
 
 static int
-get_default_threshold(struct cyclades_port * info, unsigned long *value)
+get_default_threshold(struct cyclades_port * info, unsigned long __user *value)
 {
    return put_user(info->default_threshold,value);
 }
 
 static int
-set_timeout(struct cyclades_port * info, unsigned long *arg)
+set_timeout(struct cyclades_port * info, unsigned long __user *arg)
 {
    volatile unsigned char *base_addr = (u_char *)BASE_ADDR;
    int channel;
@@ -1581,7 +1581,7 @@ set_timeout(struct cyclades_port * info, unsigned long *arg)
 }
 
 static int
-get_timeout(struct cyclades_port * info, unsigned long *value)
+get_timeout(struct cyclades_port * info, unsigned long __user *value)
 {
    volatile unsigned char *base_addr = (u_char *)BASE_ADDR;
    int channel;
@@ -1601,7 +1601,7 @@ set_default_timeout(struct cyclades_port * info, unsigned long value)
 }
 
 static int
-get_default_timeout(struct cyclades_port * info, unsigned long *value)
+get_default_timeout(struct cyclades_port * info, unsigned long __user *value)
 {
    return put_user(info->default_timeout,value);
 }
@@ -1613,6 +1613,7 @@ cy_ioctl(struct tty_struct *tty, struct file * file,
   unsigned long val;
   struct cyclades_port * info = (struct cyclades_port *)tty->driver_data;
   int ret_val = 0;
+  void __user *argp = (void __user *)arg;
 
 #ifdef SERIAL_DEBUG_OTHER
     printk("cy_ioctl %s, cmd = %x arg = %lx\n", tty->name, cmd, arg); /* */
@@ -1620,28 +1621,28 @@ cy_ioctl(struct tty_struct *tty, struct file * file,
 
     switch (cmd) {
         case CYGETMON:
-            ret_val = get_mon_info(info, (struct cyclades_monitor *)arg);
+            ret_val = get_mon_info(info, argp);
            break;
         case CYGETTHRESH:
-           ret_val = get_threshold(info, (unsigned long *)arg);
+           ret_val = get_threshold(info, argp);
            break;
         case CYSETTHRESH:
-            ret_val = set_threshold(info, (unsigned long *)arg);
+            ret_val = set_threshold(info, argp);
            break;
         case CYGETDEFTHRESH:
-           ret_val = get_default_threshold(info, (unsigned long *)arg);
+           ret_val = get_default_threshold(info, argp);
            break;
         case CYSETDEFTHRESH:
-            ret_val = set_default_threshold(info, (unsigned long *)arg);
+            ret_val = set_default_threshold(info, argp);
            break;
         case CYGETTIMEOUT:
-           ret_val = get_timeout(info, (unsigned long *)arg);
+           ret_val = get_timeout(info, argp);
            break;
         case CYSETTIMEOUT:
-            ret_val = set_timeout(info, (unsigned long *)arg);
+            ret_val = set_timeout(info, argp);
            break;
         case CYGETDEFTIMEOUT:
-           ret_val = get_default_timeout(info, (unsigned long *)arg);
+           ret_val = get_default_timeout(info, argp);
            break;
         case CYSETDEFTIMEOUT:
             ret_val = set_default_timeout(info, (unsigned long)arg);
@@ -1664,21 +1665,20 @@ cy_ioctl(struct tty_struct *tty, struct file * file,
 
 /* The following commands are incompletely implemented!!! */
         case TIOCGSOFTCAR:
-            ret_val = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long *) arg);
+            ret_val = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) argp);
             break;
         case TIOCSSOFTCAR:
-            ret_val = get_user(val, (unsigned long *) arg);
+            ret_val = get_user(val, (unsigned long __user *) argp);
            if (ret_val)
                    break;
             tty->termios->c_cflag =
                     ((tty->termios->c_cflag & ~CLOCAL) | (val ? CLOCAL : 0));
             break;
         case TIOCGSERIAL:
-            ret_val = get_serial_info(info, (struct serial_struct *) arg);
+            ret_val = get_serial_info(info, argp);
             break;
         case TIOCSSERIAL:
-            ret_val = set_serial_info(info,
-                                   (struct serial_struct *) arg);
+            ret_val = set_serial_info(info, argp);
             break;
         default:
            ret_val = -ENOIOCTLCMD;
@@ -1773,7 +1773,7 @@ cy_close(struct tty_struct * tty, struct file * filp)
        tty->driver->flush_buffer(tty);
     tty_ldisc_flush(tty);
     info->event = 0;
-    info->tty = 0;
+    info->tty = NULL;
     if (info->blocked_open) {
        if (info->close_delay) {
            msleep_interruptible(jiffies_to_msecs(info->close_delay));
@@ -2250,7 +2250,7 @@ scrn[1] = '\0';
                info->card = index;
                info->line = port_num;
                info->flags = STD_COM_FLAGS;
-               info->tty = 0;
+               info->tty = NULL;
                info->xmit_fifo_size = 12;
                info->cor1 = CyPARITY_NONE|Cy_8_BITS;
                info->cor2 = CyETC;
index d0b88d0e87fdd5f72f5b6d992efafafadf877ea5..7e1bd9562c2ac3c2e63b40fb50dc92019b5231f8 100644 (file)
@@ -183,11 +183,6 @@ static int sx_poll = HZ;
 
 static struct tty_driver *specialix_driver;
 
-static unsigned long baud_table[] =  {
-       0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
-       9600, 19200, 38400, 57600, 115200, 0,
-};
-
 static struct specialix_board sx_board[SX_NBOARD] =  {
        { 0, SX_IOBASE1,  9, },
        { 0, SX_IOBASE2, 11, },
@@ -1090,9 +1085,9 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
 
        if (baud == 38400) {
                if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
-                       baud ++;
+                       baud = 57600;
                if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
-                       baud += 2;
+                       baud = 115200;
        }
 
        if (!baud) {
@@ -1150,11 +1145,9 @@ static void sx_change_speed(struct specialix_board *bp, struct specialix_port *p
        sx_out(bp, CD186x_RBPRL, tmp & 0xff);
        sx_out(bp, CD186x_TBPRL, tmp & 0xff);
        spin_unlock_irqrestore(&bp->lock, flags);
-       if (port->custom_divisor) {
+       if (port->custom_divisor)
                baud = (SX_OSCFREQ + port->custom_divisor/2) / port->custom_divisor;
-               baud = ( baud + 5 ) / 10;
-       } else
-               baud = (baud_table[baud] + 5) / 10;   /* Estimated CPS */
+       baud = (baud + 5) / 10;         /* Estimated CPS */
 
        /* Two timer ticks seems enough to wakeup something like SLIP driver */
        tmp = ((baud + HZ/2) / HZ) * 2 - CD186x_NFIFO;
index 5fec626598cd26b117a4518761ea40abc19be053..cc10af08cb059b7f587586518011613541bf028b 100644 (file)
@@ -2602,7 +2602,7 @@ static void __exit sx_exit (void)
                }
        }
        if (misc_deregister(&sx_fw_device) < 0) {
-               printk (KERN_INFO "sx: couldn't deregister firmware loader devic\n");
+               printk (KERN_INFO "sx: couldn't deregister firmware loader device\n");
        }
        sx_dprintk (SX_DEBUG_CLEANUP, "Cleaning up drivers (%d)\n", sx_initialized);
        if (sx_initialized)
index f2864cc64240d36ad637755bbd30cfdbfb31cfd9..06784adcc35c78f7f17650aa69722359b11129a2 100644 (file)
@@ -133,8 +133,8 @@ static MGSL_PARAMS default_params = {
 };
 
 #define SHARED_MEM_ADDRESS_SIZE 0x40000
-#define BUFFERLISTSIZE (PAGE_SIZE)
-#define DMABUFFERSIZE (PAGE_SIZE)
+#define BUFFERLISTSIZE 4096
+#define DMABUFFERSIZE 4096
 #define MAXRXFRAMES 7
 
 typedef struct _DMABUFFERENTRY
index a082a2e342522a78edfd642f272784bd1fbee276..6ad2d3bb945c06fc2dc86d92eaa8d2fa410c0308 100644 (file)
@@ -1153,7 +1153,14 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
 
        spin_unlock(&driver_lock);
 
-       sysfs_create_group(&dev->kobj, chip->vendor.attr_group);
+       if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
+               list_del(&chip->list);
+               put_device(dev);
+               clear_bit(chip->dev_num, dev_mask);
+               kfree(chip);
+               kfree(devname);
+               return NULL;
+       }
 
        chip->bios_dir = tpm_bios_log_setup(devname);
 
index ad8ffe49256f75990e45a535b534851133eb8526..1ab0896070be2fa7cbc97006de065f5b27ef85dc 100644 (file)
@@ -184,7 +184,9 @@ static int __init init_atmel(void)
        unsigned long base;
        struct  tpm_chip *chip;
 
-       driver_register(&atml_drv);
+       rc = driver_register(&atml_drv);
+       if (rc)
+               return rc;
 
        if ((iobase = atmel_get_base_addr(&base, &region_size)) == NULL) {
                rc = -ENODEV;
@@ -195,10 +197,8 @@ static int __init init_atmel(void)
            (atmel_request_region
             (tpm_atmel.base, region_size, "tpm_atmel0") == NULL) ? 0 : 1;
 
-
-       if (IS_ERR
-           (pdev =
-            platform_device_register_simple("tpm_atmel", -1, NULL, 0))) {
+       pdev = platform_device_register_simple("tpm_atmel", -1, NULL, 0);
+       if (IS_ERR(pdev)) {
                rc = PTR_ERR(pdev);
                goto err_rel_reg;
        }
index 26287aace87db5376bd7a48b18d37ba03564937f..608f73071bef91acbdbcb93d6bf17a7527d9a26a 100644 (file)
@@ -284,7 +284,7 @@ static struct device_driver nsc_drv = {
 static int __init init_nsc(void)
 {
        int rc = 0;
-       int lo, hi;
+       int lo, hi, err;
        int nscAddrBase = TPM_ADDR;
        struct tpm_chip *chip;
        unsigned long base;
@@ -297,7 +297,9 @@ static int __init init_nsc(void)
                        return -ENODEV;
        }
 
-       driver_register(&nsc_drv);
+       err = driver_register(&nsc_drv);
+       if (err)
+               return err;
 
        hi = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_HI);
        lo = tpm_read_index(nscAddrBase, TPM_NSC_BASE0_LO);
index 3a365e159d89d52df59b945d22bb7bd1408022f9..d944647c82c2e0dfc771fb286a3554569e0f487c 100644 (file)
@@ -226,14 +226,26 @@ static int __init eisa_init_device (struct eisa_root_device *root,
 
 static int __init eisa_register_device (struct eisa_device *edev)
 {
-       if (device_register (&edev->dev))
-               return -1;
+       int rc = device_register (&edev->dev);
+       if (rc)
+               return rc;
 
-       device_create_file (&edev->dev, &dev_attr_signature);
-       device_create_file (&edev->dev, &dev_attr_enabled);
-       device_create_file (&edev->dev, &dev_attr_modalias);
+       rc = device_create_file (&edev->dev, &dev_attr_signature);
+       if (rc) goto err_devreg;
+       rc = device_create_file (&edev->dev, &dev_attr_enabled);
+       if (rc) goto err_sig;
+       rc = device_create_file (&edev->dev, &dev_attr_modalias);
+       if (rc) goto err_enab;
 
        return 0;
+
+err_enab:
+       device_remove_file (&edev->dev, &dev_attr_enabled);
+err_sig:
+       device_remove_file (&edev->dev, &dev_attr_signature);
+err_devreg:
+       device_unregister(&edev->dev);
+       return rc;
 }
 
 static int __init eisa_request_resources (struct eisa_root_device *root,
index fc17599c905e001f055cec284d52bd3e1f86a25f..08b16179844308ab80b4406638ff43927156cf3d 100644 (file)
@@ -249,7 +249,7 @@ static int packetize_data(void *data, size_t length)
                if ((rc = create_packet(temp, packet_length)))
                        return rc;
 
-               pr_debug("%p:%lu\n", temp, (end - temp));
+               pr_debug("%p:%td\n", temp, (end - temp));
                temp += packet_length;
        }
 
@@ -718,14 +718,27 @@ static int __init dcdrbu_init(void)
                return -EIO;
        }
 
-       sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
-       sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
-       sysfs_create_bin_file(&rbu_device->dev.kobj,
+       rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
+       if (rc)
+               goto out_devreg;
+       rc = sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
+       if (rc)
+               goto out_data;
+       rc = sysfs_create_bin_file(&rbu_device->dev.kobj,
                &rbu_packet_size_attr);
+       if (rc)
+               goto out_imtype;
 
        rbu_data.entry_created = 0;
-       return rc;
+       return 0;
 
+out_imtype:
+       sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
+out_data:
+       sysfs_remove_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
+out_devreg:
+       platform_device_unregister(rbu_device);
+       return rc;
 }
 
 static __exit void dcdrbu_exit(void)
index 8ebce1c03ad77f887038924590ecfc009f782b1d..5ab5e393b882810556d80c6c7fa5071e444362b3 100644 (file)
@@ -639,7 +639,12 @@ efivar_create_sysfs_entry(unsigned long variable_name_size,
 
        kobject_set_name(&new_efivar->kobj, "%s", short_name);
        kobj_set_kset_s(new_efivar, vars_subsys);
-       kobject_register(&new_efivar->kobj);
+       i = kobject_register(&new_efivar->kobj);
+       if (i) {
+               kfree(short_name);
+               kfree(new_efivar);
+               return 1;
+       }
 
        kfree(short_name);
        short_name = NULL;
index 69bbb6206a00ef5673638c0ccfb4a59bfa272b1d..bddfebdf91d8aea8a14ee0c0de25e0016e74ef63 100644 (file)
@@ -597,7 +597,7 @@ static void cdrom_prepare_request(ide_drive_t *drive, struct request *rq)
        struct cdrom_info *cd = drive->driver_data;
 
        ide_init_drive_cmd(rq);
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       rq->cmd_type = REQ_TYPE_ATA_PC;
        rq->rq_disk = cd->disk;
 }
 
@@ -716,7 +716,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
                ide_error(drive, "request sense failure", stat);
                return 1;
 
-       } else if (blk_pc_request(rq)) {
+       } else if (blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) {
                /* All other functions, except for READ. */
                unsigned long flags;
 
@@ -2023,7 +2023,8 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
                }
                info->last_block = block;
                return action;
-       } else if (rq->cmd_type == REQ_TYPE_SENSE) {
+       } else if (rq->cmd_type == REQ_TYPE_SENSE ||
+                  rq->cmd_type == REQ_TYPE_ATA_PC) {
                return cdrom_do_packet_command(drive);
        } else if (blk_pc_request(rq)) {
                return cdrom_do_block_pc(drive, rq);
index 1d0470c1f9579d262f0e93c2fcebab6dd89ad399..30175c7688e877016e0bd666bcfa4b8ca5e92ad4 100644 (file)
@@ -524,8 +524,8 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
        task_ioreg_t *hobsptr   = args.hobRegister;
        int err                 = 0;
        int tasksize            = sizeof(struct ide_task_request_s);
-       int taskin              = 0;
-       int taskout             = 0;
+       unsigned int taskin     = 0;
+       unsigned int taskout    = 0;
        u8 io_32bit             = drive->io_32bit;
        char __user *buf = (char __user *)arg;
 
@@ -538,8 +538,13 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
                return -EFAULT;
        }
 
-       taskout = (int) req_task->out_size;
-       taskin  = (int) req_task->in_size;
+       taskout = req_task->out_size;
+       taskin  = req_task->in_size;
+       
+       if (taskin > 65536 || taskout > 65536) {
+               err = -EINVAL;
+               goto abort;
+       }
 
        if (taskout) {
                int outtotal = tasksize;
index 965c43659e35c60e807516d07621b3c254685a01..5b77a5bcbf0c113b7a95f8ffc04bfa302292677a 100644 (file)
@@ -237,10 +237,12 @@ static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_devi
        if (dev->vendor == PCI_VENDOR_ID_JMICRON && PCI_FUNC(dev->devfn) != 1)
                goto out;
 
-       pci_read_config_word(dev, PCI_COMMAND, &command);
-       if (!(command & PCI_COMMAND_IO)) {
-               printk(KERN_INFO "Skipping disabled %s IDE controller.\n", d->name);
-               goto out;
+       if (dev->vendor != PCI_VENDOR_ID_JMICRON) {
+               pci_read_config_word(dev, PCI_COMMAND, &command);
+               if (!(command & PCI_COMMAND_IO)) {
+                       printk(KERN_INFO "Skipping disabled %s IDE controller.\n", d->name);
+                       goto out;
+               }
        }
        ret = ide_setup_pci_device(dev, d);
 out:
index f3fe287fbd89ba6b5d65deb92b42ba1ca9f875e2..244f7eb7006d43ea5f7f691a170e93ff057fc6c0 100644 (file)
@@ -774,7 +774,7 @@ ioc4_ide_exit(void)
        ioc4_unregister_submodule(&ioc4_ide_submodule);
 }
 
-module_init(ioc4_ide_init);
+late_initcall(ioc4_ide_init); /* Call only after IDE init is done */
 module_exit(ioc4_ide_exit);
 
 MODULE_AUTHOR("Aniket Malatpure/Jeremy Higdon");
index 1be963961c15734bfd923c07c589ab6a1a8f2a47..ab4da79ee560d9279afc989d129715c45dd1593c 100644 (file)
@@ -60,7 +60,7 @@ static struct fasync_struct *hp_sdc_rtc_async_queue;
 
 static DECLARE_WAIT_QUEUE_HEAD(hp_sdc_rtc_wait);
 
-static ssize_t hp_sdc_rtc_read(struct file *file, char *buf,
+static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
                               size_t count, loff_t *ppos);
 
 static int hp_sdc_rtc_ioctl(struct inode *inode, struct file *file,
@@ -385,14 +385,14 @@ static int hp_sdc_rtc_set_i8042timer (struct timeval *setto, uint8_t setcmd)
        return 0;
 }
 
-static ssize_t hp_sdc_rtc_read(struct file *file, char *buf,
+static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
                               size_t count, loff_t *ppos) {
        ssize_t retval;
 
         if (count < sizeof(unsigned long))
                 return -EINVAL;
 
-       retval = put_user(68, (unsigned long *)buf);
+       retval = put_user(68, (unsigned long __user *)buf);
        return retval;
 }
 
@@ -696,7 +696,7 @@ static int __init hp_sdc_rtc_init(void)
        if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr)))
                return ret;
        misc_register(&hp_sdc_rtc_dev);
-        create_proc_read_entry ("driver/rtc", 0, 0, 
+        create_proc_read_entry ("driver/rtc", 0, NULL,
                                hp_sdc_rtc_read_proc, NULL);
 
        printk(KERN_INFO "HP i8042 SDC + MSM-58321 RTC support loaded "
index 4639537336fc116c9eeb62dcbb5741027e342e7b..7b9d1c1da41a7d676737e7adc92c1f1700e1d6a1 100644 (file)
@@ -17,7 +17,7 @@
  * with this program; if not, write to the Free Software Foundation, Inc.,
  * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
  */
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/dmi.h>
 #include <linux/init.h>
 #include <linux/input.h>
index bdfde046b7410b0df4370b92c7913f96fe1c9823..49e11e2c1d5dd24e08c8c0caab91d270e805c19e 100644 (file)
@@ -391,23 +391,23 @@ static int hilse_operate(hil_mlc *mlc, int repoll) {
 }
 
 #define FUNC(funct, funct_arg, zero_rc, neg_rc, pos_rc) \
-{ HILSE_FUNC,          { func: &funct }, funct_arg, zero_rc, neg_rc, pos_rc },
+{ HILSE_FUNC,          { .func = funct }, funct_arg, zero_rc, neg_rc, pos_rc },
 #define OUT(pack) \
-{ HILSE_OUT,           { packet: pack }, 0, HILSEN_NEXT, HILSEN_DOZE, 0 },
+{ HILSE_OUT,           { .packet = pack }, 0, HILSEN_NEXT, HILSEN_DOZE, 0 },
 #define CTS \
-{ HILSE_CTS,           { packet: 0    }, 0, HILSEN_NEXT | HILSEN_SCHED | HILSEN_BREAK, HILSEN_DOZE, 0 },
+{ HILSE_CTS,           { .packet = 0    }, 0, HILSEN_NEXT | HILSEN_SCHED | HILSEN_BREAK, HILSEN_DOZE, 0 },
 #define EXPECT(comp, to, got, got_wrong, timed_out) \
-{ HILSE_EXPECT,                { packet: comp }, to, got, got_wrong, timed_out },
+{ HILSE_EXPECT,                { .packet = comp }, to, got, got_wrong, timed_out },
 #define EXPECT_LAST(comp, to, got, got_wrong, timed_out) \
-{ HILSE_EXPECT_LAST,   { packet: comp }, to, got, got_wrong, timed_out },
+{ HILSE_EXPECT_LAST,   { .packet = comp }, to, got, got_wrong, timed_out },
 #define EXPECT_DISC(comp, to, got, got_wrong, timed_out) \
-{ HILSE_EXPECT_DISC,   { packet: comp }, to, got, got_wrong, timed_out },
+{ HILSE_EXPECT_DISC,   { .packet = comp }, to, got, got_wrong, timed_out },
 #define IN(to, got, got_error, timed_out) \
-{ HILSE_IN,            { packet: 0    }, to, got, got_error, timed_out },
+{ HILSE_IN,            { .packet = 0    }, to, got, got_error, timed_out },
 #define OUT_DISC(pack) \
-{ HILSE_OUT_DISC,      { packet: pack }, 0, 0, 0, 0 },
+{ HILSE_OUT_DISC,      { .packet = pack }, 0, 0, 0, 0 },
 #define OUT_LAST(pack) \
-{ HILSE_OUT_LAST,      { packet: pack }, 0, 0, 0, 0 },
+{ HILSE_OUT_LAST,      { .packet = pack }, 0, 0, 0, 0 },
 
 struct hilse_node hil_mlc_se[HILSEN_END] = {
 
index ba7b920347e3f11c5b2e2b889b5c8e4d2987545c..9907ad3bea23252547a6a26c6cfd4d4dafae1269 100644 (file)
@@ -310,7 +310,7 @@ static void hp_sdc_tasklet(unsigned long foo) {
                                 * in tasklet/bh context.
                                 */
                                if (curr->act.irqhook) 
-                                       curr->act.irqhook(0, 0, 0, 0);
+                                       curr->act.irqhook(0, NULL, 0, 0);
                        }
                        curr->actidx = curr->idx;
                        curr->idx++;
@@ -525,7 +525,7 @@ actdone:
                up(curr->act.semaphore);
        }
        else if (act & HP_SDC_ACT_CALLBACK) {
-               curr->act.irqhook(0,0,0,0);
+               curr->act.irqhook(0,NULL,0,0);
        }
        if (curr->idx >= curr->endidx) { /* This transaction is over. */
                if (act & HP_SDC_ACT_DEALLOC) kfree(curr);
index d10c8b82e6aaeac3587361efb531cb92de5ef150..b6f9476c0501d75abd98916098726d070c1436be 100644 (file)
@@ -1907,7 +1907,8 @@ static int if_readstat(u8 __user *buf, int len, int id, int channel)
        }
 
        for (p=buf, count=0; count < len; p++, count++) {
-               put_user(*card->q931_read++, p);
+               if (put_user(*card->q931_read++, p))
+                       return -EFAULT;
                if (card->q931_read > card->q931_end)
                        card->q931_read = card->q931_buf;
        }
index e4823ab2b12702b6085a48cec68ac1ad077c9dbc..785b08554fcaa3685a3115a965be95bccc1d72e1 100644 (file)
@@ -631,7 +631,8 @@ static int HiSax_readstatus(u_char __user *buf, int len, int id, int channel)
                count = cs->status_end - cs->status_read + 1;
                if (count >= len)
                        count = len;
-               copy_to_user(p, cs->status_read, count);
+               if (copy_to_user(p, cs->status_read, count))
+                       return -EFAULT;
                cs->status_read += count;
                if (cs->status_read > cs->status_end)
                        cs->status_read = cs->status_buf;
@@ -642,7 +643,8 @@ static int HiSax_readstatus(u_char __user *buf, int len, int id, int channel)
                                cnt = HISAX_STATUS_BUFSIZE;
                        else
                                cnt = count;
-                       copy_to_user(p, cs->status_read, cnt);
+                       if (copy_to_user(p, cs->status_read, cnt))
+                               return -EFAULT;
                        p += cnt;
                        cs->status_read += cnt % HISAX_STATUS_BUFSIZE;
                        count -= cnt;
index 160f22fa594176551c8d5d7bc1a04224463f72bf..8bbe33ae06dba0d4259c971facd45b88f138c374 100644 (file)
@@ -45,11 +45,10 @@ ergo_interrupt(int intno, void *dev_id)
        if (!card->irq_enabled)
                return IRQ_NONE;                /* other device interrupting or irq switched off */
 
-       save_flags(flags);
-       cli();                  /* no further irqs allowed */
+       spin_lock_irqsave(&card->hysdn_lock, flags); /* no further irqs allowed */
 
        if (!(bytein(card->iobase + PCI9050_INTR_REG) & PCI9050_INTR_REG_STAT1)) {
-               restore_flags(flags);   /* restore old state */
+               spin_unlock_irqrestore(&card->hysdn_lock, flags);       /* restore old state */
                return IRQ_NONE;                /* no interrupt requested by E1 */
        }
        /* clear any pending ints on the board */
@@ -61,7 +60,7 @@ ergo_interrupt(int intno, void *dev_id)
        /* start kernel task immediately after leaving all interrupts */
        if (!card->hw_lock)
                schedule_work(&card->irq_queue);
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->hysdn_lock, flags);
        return IRQ_HANDLED;
 }                              /* ergo_interrupt */
 
@@ -83,10 +82,9 @@ ergo_irq_bh(hysdn_card * card)
 
        dpr = card->dpram;      /* point to DPRAM */
 
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->hysdn_lock, flags);
        if (card->hw_lock) {
-               restore_flags(flags);   /* hardware currently unavailable */
+               spin_unlock_irqrestore(&card->hysdn_lock, flags);       /* hardware currently unavailable */
                return;
        }
        card->hw_lock = 1;      /* we now lock the hardware */
@@ -120,7 +118,7 @@ ergo_irq_bh(hysdn_card * card)
                        card->hw_lock = 0;      /* free hardware again */
        } while (again);        /* until nothing more to do */
 
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->hysdn_lock, flags);
 }                              /* ergo_irq_bh */
 
 
@@ -137,8 +135,7 @@ ergo_stopcard(hysdn_card * card)
 #ifdef CONFIG_HYSDN_CAPI
        hycapi_capi_stop(card);
 #endif /* CONFIG_HYSDN_CAPI */
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->hysdn_lock, flags);
        val = bytein(card->iobase + PCI9050_INTR_REG);  /* get actual value */
        val &= ~(PCI9050_INTR_REG_ENPCI | PCI9050_INTR_REG_EN1);        /* mask irq */
        byteout(card->iobase + PCI9050_INTR_REG, val);
@@ -147,7 +144,7 @@ ergo_stopcard(hysdn_card * card)
        card->state = CARD_STATE_UNUSED;
        card->err_log_state = ERRLOG_STATE_OFF;         /* currently no log active */
 
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->hysdn_lock, flags);
 }                              /* ergo_stopcard */
 
 /**************************************************************************/
@@ -162,12 +159,11 @@ ergo_set_errlog_state(hysdn_card * card, int on)
                card->err_log_state = ERRLOG_STATE_OFF;         /* must be off */
                return;
        }
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->hysdn_lock, flags);
 
        if (((card->err_log_state == ERRLOG_STATE_OFF) && !on) ||
            ((card->err_log_state == ERRLOG_STATE_ON) && on)) {
-               restore_flags(flags);
+               spin_unlock_irqrestore(&card->hysdn_lock, flags);
                return;         /* nothing to do */
        }
        if (on)
@@ -175,7 +171,7 @@ ergo_set_errlog_state(hysdn_card * card, int on)
        else
                card->err_log_state = ERRLOG_STATE_STOP;        /* request stop */
 
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->hysdn_lock, flags);
        schedule_work(&card->irq_queue);
 }                              /* ergo_set_errlog_state */
 
@@ -356,8 +352,7 @@ ergo_waitpofready(struct HYSDN_CARD *card)
 
                        if (card->debug_flags & LOG_POF_RECORD)
                                hysdn_addlog(card, "ERGO: pof boot success");
-                       save_flags(flags);
-                       cli();
+                       spin_lock_irqsave(&card->hysdn_lock, flags);
 
                        card->state = CARD_STATE_RUN;   /* now card is running */
                        /* enable the cards interrupt */
@@ -370,7 +365,7 @@ ergo_waitpofready(struct HYSDN_CARD *card)
                        dpr->ToHyInt = 1;
                        dpr->ToPcInt = 1;       /* interrupt to E1 for all cards */
 
-                       restore_flags(flags);
+                       spin_unlock_irqrestore(&card->hysdn_lock, flags);
                        if ((hynet_enable & (1 << card->myid)) 
                            && (i = hysdn_net_create(card))) 
                        {
@@ -448,6 +443,7 @@ ergo_inithardware(hysdn_card * card)
        card->waitpofready = ergo_waitpofready;
        card->set_errlog_state = ergo_set_errlog_state;
        INIT_WORK(&card->irq_queue, (void *) (void *) ergo_irq_bh, card);
+       card->hysdn_lock = SPIN_LOCK_UNLOCKED;
 
        return (0);
 }                              /* ergo_inithardware */
index 461e831592ddd28710c36c8d422ae0f286645445..729df40893857789284f831afe43fe4bf0a943de 100644 (file)
@@ -188,6 +188,8 @@ typedef struct HYSDN_CARD {
        /* init and deinit stopcard for booting, too */
        void (*stopcard) (struct HYSDN_CARD *);
        void (*releasehardware) (struct HYSDN_CARD *);
+
+       spinlock_t hysdn_lock;
 #ifdef CONFIG_HYSDN_CAPI
        struct hycapictrl_info {
                char cardname[32];
index c4301e8338eff58a4a01fc7f2120fbfeefe5dd3a..fcd49920b2203ab9d33359a204e1c35f082337bd 100644 (file)
@@ -116,8 +116,7 @@ put_log_buffer(hysdn_card * card, char *cp)
        strcpy(ib->log_start, cp);      /* set output string */
        ib->next = NULL;
        ib->proc_ctrl = pd;     /* point to own control structure */
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->hysdn_lock, flags);
        ib->usage_cnt = pd->if_used;
        if (!pd->log_head)
                pd->log_head = ib;      /* new head */
@@ -125,7 +124,7 @@ put_log_buffer(hysdn_card * card, char *cp)
                pd->log_tail->next = ib;        /* follows existing messages */
        pd->log_tail = ib;      /* new tail */
        i = pd->del_lock++;     /* get lock state */
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->hysdn_lock, flags);
 
        /* delete old entrys */
        if (!i)
@@ -270,14 +269,13 @@ hysdn_log_open(struct inode *ino, struct file *filep)
        } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
 
                /* read access -> log/debug read */
-               save_flags(flags);
-               cli();
+               spin_lock_irqsave(&card->hysdn_lock, flags);
                pd->if_used++;
                if (pd->log_head)
                        filep->private_data = &pd->log_tail->next;
                else
                        filep->private_data = &pd->log_head;
-               restore_flags(flags);
+               spin_unlock_irqrestore(&card->hysdn_lock, flags);
        } else {                /* simultaneous read/write access forbidden ! */
                unlock_kernel();
                return (-EPERM);        /* no permission this time */
@@ -301,7 +299,7 @@ hysdn_log_close(struct inode *ino, struct file *filep)
        hysdn_card *card;
        int retval = 0;
        unsigned long flags;
-
+       spinlock_t hysdn_lock = SPIN_LOCK_UNLOCKED;
 
        lock_kernel();
        if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) {
@@ -311,8 +309,7 @@ hysdn_log_close(struct inode *ino, struct file *filep)
                /* read access -> log/debug read, mark one further file as closed */
 
                pd = NULL;
-               save_flags(flags);
-               cli();
+               spin_lock_irqsave(&hysdn_lock, flags);
                inf = *((struct log_data **) filep->private_data);      /* get first log entry */
                if (inf)
                        pd = (struct procdata *) inf->proc_ctrl;        /* still entries there */
@@ -335,7 +332,7 @@ hysdn_log_close(struct inode *ino, struct file *filep)
                        inf->usage_cnt--;       /* decrement usage count for buffers */
                        inf = inf->next;
                }
-               restore_flags(flags);
+               spin_unlock_irqrestore(&hysdn_lock, flags);
 
                if (pd)
                        if (pd->if_used <= 0)   /* delete buffers if last file closed */
index 1c0d54ac12abeb8cb2686ea87adea4619b51e24f..1fadf0133e9b6355e4e994b03e6f795d955bc130 100644 (file)
@@ -155,8 +155,7 @@ hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan)
        if (card->debug_flags & LOG_SCHED_ASYN)
                hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1);
 
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->hysdn_lock, flags);
        while (card->async_busy) {
                sti();
 
@@ -165,7 +164,7 @@ hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan)
 
                msleep_interruptible(20);               /* Timeout 20ms */
                if (!--cnt) {
-                       restore_flags(flags);
+                       spin_unlock_irqrestore(&card->hysdn_lock, flags);
                        return (-ERR_ASYNC_TIME);       /* timed out */
                }
                cli();
@@ -194,13 +193,13 @@ hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan)
 
                msleep_interruptible(20);               /* Timeout 20ms */
                if (!--cnt) {
-                       restore_flags(flags);
+                       spin_unlock_irqrestore(&card->hysdn_lock, flags);
                        return (-ERR_ASYNC_TIME);       /* timed out */
                }
                cli();
        }                       /* wait for buffer to become free again */
 
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->hysdn_lock, flags);
 
        if (card->debug_flags & LOG_SCHED_ASYN)
                hysdn_addlog(card, "async tx-cfg data send");
index c3d79eef9e3245ef0f8e8a28b015dd2bb8db39d7..69aee2602aa62aac22a3c6e41442a4d1810a7ee5 100644 (file)
@@ -1134,9 +1134,12 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t * off)
                if (dev->drv[drvidx]->interface->readstat) {
                        if (count > dev->drv[drvidx]->stavail)
                                count = dev->drv[drvidx]->stavail;
-                       len = dev->drv[drvidx]->interface->
-                               readstat(buf, count, drvidx,
-                                        isdn_minor2chan(minor));
+                       len = dev->drv[drvidx]->interface->readstat(buf, count,
+                                               drvidx, isdn_minor2chan(minor));
+                       if (len < 0) {
+                               retval = len;
+                               goto out;
+                       }
                } else {
                        len = 0;
                }
index 6649f8bc99512247cdbfb6142dbfcce40f0669b0..730bbd07ebc7ca2e842e5cde6080fdd466203027 100644 (file)
@@ -1010,7 +1010,8 @@ icn_readstatus(u_char __user *buf, int len, icn_card * card)
        for (p = buf, count = 0; count < len; p++, count++) {
                if (card->msg_buf_read == card->msg_buf_write)
                        return count;
-               put_user(*card->msg_buf_read++, p);
+               if (put_user(*card->msg_buf_read++, p))
+                       return -EFAULT;
                if (card->msg_buf_read > card->msg_buf_end)
                        card->msg_buf_read = card->msg_buf;
        }
index fabbd461603e217befac34687aa5eb08f7ebb575..c3ae2edaf6fa5812cf039ef00ef8e7b467aed901 100644 (file)
@@ -100,12 +100,11 @@ isdnloop_pollbchan(unsigned long data)
                isdnloop_bchan_send(card, 1);
        if (card->flags & (ISDNLOOP_FLAGS_B1ACTIVE | ISDNLOOP_FLAGS_B2ACTIVE)) {
                /* schedule b-channel polling again */
-               save_flags(flags);
-               cli();
+               spin_lock_irqsave(&card->isdnloop_lock, flags);
                card->rb_timer.expires = jiffies + ISDNLOOP_TIMER_BCREAD;
                add_timer(&card->rb_timer);
                card->flags |= ISDNLOOP_FLAGS_RBTIMER;
-               restore_flags(flags);
+               spin_unlock_irqrestore(&card->isdnloop_lock, flags);
        } else
                card->flags &= ~ISDNLOOP_FLAGS_RBTIMER;
 }
@@ -281,8 +280,7 @@ isdnloop_putmsg(isdnloop_card * card, unsigned char c)
 {
        ulong flags;
 
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->isdnloop_lock, flags);
        *card->msg_buf_write++ = (c == 0xff) ? '\n' : c;
        if (card->msg_buf_write == card->msg_buf_read) {
                if (++card->msg_buf_read > card->msg_buf_end)
@@ -290,7 +288,7 @@ isdnloop_putmsg(isdnloop_card * card, unsigned char c)
        }
        if (card->msg_buf_write > card->msg_buf_end)
                card->msg_buf_write = card->msg_buf;
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
 }
 
 /*
@@ -372,21 +370,19 @@ isdnloop_polldchan(unsigned long data)
                if (!(card->flags & ISDNLOOP_FLAGS_RBTIMER)) {
                        /* schedule b-channel polling */
                        card->flags |= ISDNLOOP_FLAGS_RBTIMER;
-                       save_flags(flags);
-                       cli();
+                       spin_lock_irqsave(&card->isdnloop_lock, flags);
                        del_timer(&card->rb_timer);
                        card->rb_timer.function = isdnloop_pollbchan;
                        card->rb_timer.data = (unsigned long) card;
                        card->rb_timer.expires = jiffies + ISDNLOOP_TIMER_BCREAD;
                        add_timer(&card->rb_timer);
-                       restore_flags(flags);
+                       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
                }
        /* schedule again */
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->isdnloop_lock, flags);
        card->st_timer.expires = jiffies + ISDNLOOP_TIMER_DCREAD;
        add_timer(&card->st_timer);
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
 }
 
 /*
@@ -416,8 +412,7 @@ isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card)
                        return 0;
                if (card->sndcount[channel] > ISDNLOOP_MAX_SQUEUE)
                        return 0;
-               save_flags(flags);
-               cli();
+               spin_lock_irqsave(&card->isdnloop_lock, flags);
                nskb = dev_alloc_skb(skb->len);
                if (nskb) {
                        memcpy(skb_put(nskb, len), skb->data, len);
@@ -426,7 +421,7 @@ isdnloop_sendbuf(int channel, struct sk_buff *skb, isdnloop_card * card)
                } else
                        len = 0;
                card->sndcount[channel] += len;
-               restore_flags(flags);
+               spin_unlock_irqrestore(&card->isdnloop_lock, flags);
        }
        return len;
 }
@@ -451,7 +446,8 @@ isdnloop_readstatus(u_char __user *buf, int len, isdnloop_card * card)
        for (p = buf, count = 0; count < len; p++, count++) {
                if (card->msg_buf_read == card->msg_buf_write)
                        return count;
-               put_user(*card->msg_buf_read++, p);
+               if (put_user(*card->msg_buf_read++, p))
+                       return -EFAULT;
                if (card->msg_buf_read > card->msg_buf_end)
                        card->msg_buf_read = card->msg_buf;
        }
@@ -576,8 +572,7 @@ isdnloop_atimeout(isdnloop_card * card, int ch)
        unsigned long flags;
        char buf[60];
 
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->isdnloop_lock, flags);
        if (card->rcard) {
                isdnloop_fake(card->rcard[ch], "DDIS_I", card->rch[ch] + 1);
                card->rcard[ch]->rcard[card->rch[ch]] = NULL;
@@ -587,7 +582,7 @@ isdnloop_atimeout(isdnloop_card * card, int ch)
        /* No user responding */
        sprintf(buf, "CAU%s", isdnloop_unicause(card, 1, 3));
        isdnloop_fake(card, buf, ch + 1);
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
 }
 
 /*
@@ -622,8 +617,7 @@ isdnloop_start_ctimer(isdnloop_card * card, int ch)
 {
        unsigned long flags;
 
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->isdnloop_lock, flags);
        init_timer(&card->c_timer[ch]);
        card->c_timer[ch].expires = jiffies + ISDNLOOP_TIMER_ALERTWAIT;
        if (ch)
@@ -632,7 +626,7 @@ isdnloop_start_ctimer(isdnloop_card * card, int ch)
                card->c_timer[ch].function = isdnloop_atimeout0;
        card->c_timer[ch].data = (unsigned long) card;
        add_timer(&card->c_timer[ch]);
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
 }
 
 /*
@@ -647,10 +641,9 @@ isdnloop_kill_ctimer(isdnloop_card * card, int ch)
 {
        unsigned long flags;
 
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->isdnloop_lock, flags);
        del_timer(&card->c_timer[ch]);
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
 }
 
 static u_char si2bit[] =
@@ -706,13 +699,12 @@ isdnloop_try_call(isdnloop_card * card, char *p, int lch, isdn_ctrl * cmd)
                                        }
                        }
                        if (num_match) {
-                               save_flags(flags);
-                               cli();
+                               spin_lock_irqsave(&card->isdnloop_lock, flags);
                                /* channel idle? */
                                if (!(cc->rcard[ch])) {
                                        /* Check SI */
                                        if (!(si2bit[cmd->parm.setup.si1] & cc->sil[ch])) {
-                                               restore_flags(flags);
+                                               spin_unlock_irqrestore(&card->isdnloop_lock, flags);
                                                return 3;
                                        }
                                        /* ch is idle, si and number matches */
@@ -720,10 +712,10 @@ isdnloop_try_call(isdnloop_card * card, char *p, int lch, isdn_ctrl * cmd)
                                        cc->rch[ch] = lch;
                                        card->rcard[lch] = cc;
                                        card->rch[lch] = ch;
-                                       restore_flags(flags);
+                                       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
                                        return 0;
                                } else {
-                                       restore_flags(flags);
+                                       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
                                        /* num matches, but busy */
                                        if (ch == 1)
                                                return 1;
@@ -1027,8 +1019,7 @@ isdnloop_stopcard(isdnloop_card * card)
        unsigned long flags;
        isdn_ctrl cmd;
 
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->isdnloop_lock, flags);
        if (card->flags & ISDNLOOP_FLAGS_RUNNING) {
                card->flags &= ~ISDNLOOP_FLAGS_RUNNING;
                del_timer(&card->st_timer);
@@ -1039,7 +1030,7 @@ isdnloop_stopcard(isdnloop_card * card)
                cmd.driver = card->myid;
                card->interface.statcallb(&cmd);
        }
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
 }
 
 /*
@@ -1078,18 +1069,17 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
                return -EBUSY;
        if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef)))
                return -EFAULT;
-       save_flags(flags);
-       cli();
+       spin_lock_irqsave(&card->isdnloop_lock, flags);
        switch (sdef.ptype) {
                case ISDN_PTYPE_EURO:
                        if (isdnloop_fake(card, "DRV1.23EC-Q.931-CAPI-CNS-BASIS-20.02.96",
                                          -1)) {
-                               restore_flags(flags);
+                               spin_unlock_irqrestore(&card->isdnloop_lock, flags);
                                return -ENOMEM;
                        }
                        card->sil[0] = card->sil[1] = 4;
                        if (isdnloop_fake(card, "TEI OK", 0)) {
-                               restore_flags(flags);
+                               spin_unlock_irqrestore(&card->isdnloop_lock, flags);
                                return -ENOMEM;
                        }
                        for (i = 0; i < 3; i++)
@@ -1098,12 +1088,12 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
                case ISDN_PTYPE_1TR6:
                        if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
                                          -1)) {
-                               restore_flags(flags);
+                               spin_unlock_irqrestore(&card->isdnloop_lock, flags);
                                return -ENOMEM;
                        }
                        card->sil[0] = card->sil[1] = 4;
                        if (isdnloop_fake(card, "TEI OK", 0)) {
-                               restore_flags(flags);
+                               spin_unlock_irqrestore(&card->isdnloop_lock, flags);
                                return -ENOMEM;
                        }
                        strcpy(card->s0num[0], sdef.num[0]);
@@ -1111,7 +1101,7 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
                        card->s0num[2][0] = '\0';
                        break;
                default:
-                       restore_flags(flags);
+                       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
                        printk(KERN_WARNING "isdnloop: Illegal D-channel protocol %d\n",
                               sdef.ptype);
                        return -EINVAL;
@@ -1122,7 +1112,7 @@ isdnloop_start(isdnloop_card * card, isdnloop_sdef * sdefp)
        card->st_timer.data = (unsigned long) card;
        add_timer(&card->st_timer);
        card->flags |= ISDNLOOP_FLAGS_RUNNING;
-       restore_flags(flags);
+       spin_unlock_irqrestore(&card->isdnloop_lock, flags);
        return 0;
 }
 
@@ -1472,6 +1462,7 @@ isdnloop_initcard(char *id)
                skb_queue_head_init(&card->bqueue[i]);
        }
        skb_queue_head_init(&card->dqueue);
+       card->isdnloop_lock = SPIN_LOCK_UNLOCKED;
        card->next = cards;
        cards = card;
        if (!register_isdn(&card->interface)) {
index d699fe53e1c37a4ae906323aa415e89f420cf4ec..0d458a86f5299b086e6b5792b9814b35915e2c4f 100644 (file)
@@ -94,6 +94,7 @@ typedef struct isdnloop_card {
        struct sk_buff_head
         bqueue[ISDNLOOP_BCH];  /* B-Channel queues                 */
        struct sk_buff_head dqueue;     /* D-Channel queue                  */
+       spinlock_t isdnloop_lock;
 } isdnloop_card;
 
 /*
index 94f21486bb24d667b4c22b29e2c507e30262919b..6ead5e1508b705fd682c28c2a57c9a581ab5e611 100644 (file)
@@ -725,23 +725,27 @@ static int pcbit_stat(u_char __user *buf, int len, int driver, int channel)
 
        if (stat_st < stat_end)
        {
-               copy_to_user(buf, statbuf + stat_st, len);
+               if (copy_to_user(buf, statbuf + stat_st, len))
+                       return -EFAULT;
                stat_st += len;    
        }
        else
        {
                if (len > STATBUF_LEN - stat_st)
                {
-                       copy_to_user(buf, statbuf + stat_st, 
-                                      STATBUF_LEN - stat_st);
-                       copy_to_user(buf, statbuf, 
-                                      len - (STATBUF_LEN - stat_st));
+                       if (copy_to_user(buf, statbuf + stat_st,
+                                      STATBUF_LEN - stat_st))
+                               return -EFAULT;
+                       if (copy_to_user(buf, statbuf,
+                                      len - (STATBUF_LEN - stat_st)))
+                               return -EFAULT;
 
                        stat_st = len - (STATBUF_LEN - stat_st);
                }
                else
                {
-                       copy_to_user(buf, statbuf + stat_st, len);
+                       if (copy_to_user(buf, statbuf + stat_st, len))
+                               return -EFAULT;
 
                        stat_st += len;
                        
index 13e7d219d1c733fc0c5eb93371d3407d6de6cc0e..937fd21203816b7521aa92b660672b424e8f6dc3 100644 (file)
@@ -311,6 +311,7 @@ pcbit_deliver(void *data)
                dev->read_queue = frame->next;
                spin_unlock_irqrestore(&dev->lock, flags);
 
+               msg = 0;
                SET_MSG_CPU(msg, 0);
                SET_MSG_PROC(msg, 0);
                SET_MSG_CMD(msg, frame->skb->data[2]);
index 222ca7c08baa7f442dce8c85fa5c4f8f1303f53b..06c9872e8c6a5c5b503117d86fdd05dbd3286e65 100644 (file)
@@ -98,13 +98,14 @@ static int __init sc_init(void)
                         * Confirm the I/O Address with a test
                         */
                        if(io[b] == 0) {
-                               pr_debug("I/O Address 0x%x is in use.\n");
+                               pr_debug("I/O Address invalid.\n");
                                continue;
                        }
 
                        outb(0x18, io[b] + 0x400 * EXP_PAGE0);
                        if(inb(io[b] + 0x400 * EXP_PAGE0) != 0x18) {
-                               pr_debug("I/O Base 0x%x fails test\n");
+                               pr_debug("I/O Base 0x%x fails test\n",
+                                        io[b] + 0x400 * EXP_PAGE0);
                                continue;
                        }
                }
@@ -158,8 +159,8 @@ static int __init sc_init(void)
                        outb(0xFF, io[b] + RESET_OFFSET);
                        msleep_interruptible(10000);
                }
-               pr_debug("RAM Base for board %d is 0x%x, %s probe\n", b, ram[b],
-                       ram[b] == 0 ? "will" : "won't");
+               pr_debug("RAM Base for board %d is 0x%lx, %s probe\n", b,
+                       ram[b], ram[b] == 0 ? "will" : "won't");
 
                if(ram[b]) {
                        /*
@@ -168,7 +169,7 @@ static int __init sc_init(void)
                         * board model
                         */
                        if(request_region(ram[b], SRAM_PAGESIZE, "sc test")) {
-                               pr_debug("request_region for RAM base 0x%x succeeded\n", ram[b]);
+                               pr_debug("request_region for RAM base 0x%lx succeeded\n", ram[b]);
                                model = identify_board(ram[b], io[b]);
                                release_region(ram[b], SRAM_PAGESIZE);
                        }
@@ -204,7 +205,7 @@ static int __init sc_init(void)
                         * Nope, there was no place in RAM for the
                         * board, or it couldn't be identified
                         */
-                        pr_debug("Failed to find an adapter at 0x%x\n", ram[b]);
+                        pr_debug("Failed to find an adapter at 0x%lx\n", ram[b]);
                         continue;
                }
 
@@ -451,7 +452,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
        HWConfig_pl hwci;
        int x;
 
-       pr_debug("Attempting to identify adapter @ 0x%x io 0x%x\n",
+       pr_debug("Attempting to identify adapter @ 0x%lx io 0x%x\n",
                rambase, iobase);
 
        /*
@@ -490,7 +491,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
        outb(PRI_BASEPG_VAL, pgport);
        msleep_interruptible(1000);
        sig = readl(rambase + SIG_OFFSET);
-       pr_debug("Looking for a signature, got 0x%x\n", sig);
+       pr_debug("Looking for a signature, got 0x%lx\n", sig);
        if(sig == SIGNATURE)
                return PRI_BOARD;
 
@@ -500,7 +501,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
        outb(BRI_BASEPG_VAL, pgport);
        msleep_interruptible(1000);
        sig = readl(rambase + SIG_OFFSET);
-       pr_debug("Looking for a signature, got 0x%x\n", sig);
+       pr_debug("Looking for a signature, got 0x%lx\n", sig);
        if(sig == SIGNATURE)
                return BRI_BOARD;
 
@@ -510,7 +511,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
         * Try to spot a card
         */
        sig = readl(rambase + SIG_OFFSET);
-       pr_debug("Looking for a signature, got 0x%x\n", sig);
+       pr_debug("Looking for a signature, got 0x%lx\n", sig);
        if(sig != SIGNATURE)
                return -1;
 
@@ -540,7 +541,7 @@ static int identify_board(unsigned long rambase, unsigned int iobase)
        memcpy_fromio(&rcvmsg, &(dpm->rsp_queue[dpm->rsp_tail]), MSG_LEN);
        pr_debug("Got HWConfig response, status = 0x%x\n", rcvmsg.rsp_status);
        memcpy(&hwci, &(rcvmsg.msg_data.HWCresponse), sizeof(HWConfig_pl));
-       pr_debug("Hardware Config: Interface: %s, RAM Size: %d, Serial: %s\n"
+       pr_debug("Hardware Config: Interface: %s, RAM Size: %ld, Serial: %s\n"
                 "                 Part: %s, Rev: %s\n",
                 hwci.st_u_sense ? "S/T" : "U", hwci.ram_size,
                 hwci.serial_no, hwci.part_no, hwci.rev_no);
index f50defc38ae5b47baac92c6117b39d9a3bab1b04..1e04676b016b212b07d0397b5493c04bb1058c39 100644 (file)
@@ -44,7 +44,7 @@ int sndpkt(int devId, int channel, struct sk_buff *data)
                return -ENODEV;
        }
 
-       pr_debug("%s: sndpkt: frst = 0x%x nxt = %d  f = %d n = %d\n",
+       pr_debug("%s: sndpkt: frst = 0x%lx nxt = %d  f = %d n = %d\n",
                sc_adapter[card]->devicename,
                sc_adapter[card]->channel[channel].first_sendbuf,
                sc_adapter[card]->channel[channel].next_sendbuf,
@@ -66,7 +66,7 @@ int sndpkt(int devId, int channel, struct sk_buff *data)
        ReqLnkWrite.buff_offset = sc_adapter[card]->channel[channel].next_sendbuf *
                BUFFER_SIZE + sc_adapter[card]->channel[channel].first_sendbuf;
        ReqLnkWrite.msg_len = data->len; /* sk_buff size */
-       pr_debug("%s: writing %d bytes to buffer offset 0x%x\n",
+       pr_debug("%s: writing %d bytes to buffer offset 0x%lx\n",
                        sc_adapter[card]->devicename,
                        ReqLnkWrite.msg_len, ReqLnkWrite.buff_offset);
        memcpy_toshmem(card, (char *)ReqLnkWrite.buff_offset, data->data, ReqLnkWrite.msg_len);
@@ -74,7 +74,7 @@ int sndpkt(int devId, int channel, struct sk_buff *data)
        /*
         * sendmessage
         */
-       pr_debug("%s: sndpkt size=%d, buf_offset=0x%x buf_indx=%d\n",
+       pr_debug("%s: sndpkt size=%d, buf_offset=0x%lx buf_indx=%d\n",
                sc_adapter[card]->devicename,
                ReqLnkWrite.msg_len, ReqLnkWrite.buff_offset,
                sc_adapter[card]->channel[channel].next_sendbuf);
@@ -124,7 +124,7 @@ void rcvpkt(int card, RspMessage *rcvmsg)
                        return;
                }
                skb_put(skb, rcvmsg->msg_data.response.msg_len);
-               pr_debug("%s: getting data from offset: 0x%x\n",
+               pr_debug("%s: getting data from offset: 0x%lx\n",
                        sc_adapter[card]->devicename,
                        rcvmsg->msg_data.response.buff_offset);
                memcpy_fromshmem(card,
@@ -143,7 +143,7 @@ void rcvpkt(int card, RspMessage *rcvmsg)
 /*             memset_shmem(card, rcvmsg->msg_data.response.buff_offset, 0, BUFFER_SIZE); */
                newll.buff_offset = rcvmsg->msg_data.response.buff_offset;
                newll.msg_len = BUFFER_SIZE;
-               pr_debug("%s: recycled buffer at offset 0x%x size %d\n",
+               pr_debug("%s: recycled buffer at offset 0x%lx size %d\n",
                        sc_adapter[card]->devicename,
                        newll.buff_offset, newll.msg_len);
                sendmessage(card, CEPID, ceReqTypeLnk, ceReqClass1, ceReqLnkRead,
@@ -186,7 +186,7 @@ int setup_buffers(int card, int c)
        sc_adapter[card]->channel[c-1].num_sendbufs = nBuffers / 2;
        sc_adapter[card]->channel[c-1].free_sendbufs = nBuffers / 2;
        sc_adapter[card]->channel[c-1].next_sendbuf = 0;
-       pr_debug("%s: send buffer setup complete: first=0x%x n=%d f=%d, nxt=%d\n",
+       pr_debug("%s: send buffer setup complete: first=0x%lx n=%d f=%d, nxt=%d\n",
                                sc_adapter[card]->devicename,
                                sc_adapter[card]->channel[c-1].first_sendbuf,
                                sc_adapter[card]->channel[c-1].num_sendbufs,
@@ -203,7 +203,7 @@ int setup_buffers(int card, int c)
                        ((sc_adapter[card]->channel[c-1].first_sendbuf +
                        (nBuffers / 2) * buffer_size) + (buffer_size * i));
                RcvBuffOffset.msg_len = buffer_size;
-               pr_debug("%s: adding RcvBuffer #%d offset=0x%x sz=%d bufsz:%d\n",
+               pr_debug("%s: adding RcvBuffer #%d offset=0x%lx sz=%d bufsz:%d\n",
                                sc_adapter[card]->devicename,
                                i + 1, RcvBuffOffset.buff_offset, 
                                RcvBuffOffset.msg_len,buffer_size);
index 24854826ca4599842fd7165f4de12f548f1f9b45..6f58862992dbf9079fe978c2f0ccda31fcfd77e1 100644 (file)
@@ -61,7 +61,7 @@ void memcpy_toshmem(int card, void *dest, const void *src, size_t n)
        spin_unlock_irqrestore(&sc_adapter[card]->lock, flags);
        pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename,
                ((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80);
-       pr_debug("%s: copying %d bytes from %#x to %#x\n",
+       pr_debug("%s: copying %d bytes from %#lx to %#lx\n",
                sc_adapter[card]->devicename, n,
                (unsigned long) src,
                sc_adapter[card]->rambase + ((unsigned long) dest %0x4000));
index aecbbe2e89a92673fb600e0d0500194cde5dcff9..3c1711210e38a375f7ec1064c6bd59aeb7d74206 100644 (file)
@@ -91,6 +91,8 @@ EXPORT_SYMBOL_GPL(led_classdev_resume);
  */
 int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 {
+       int rc;
+
        led_cdev->class_dev = class_device_create(leds_class, NULL, 0,
                                                parent, "%s", led_cdev->name);
        if (unlikely(IS_ERR(led_cdev->class_dev)))
@@ -99,8 +101,10 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
        class_set_devdata(led_cdev->class_dev, led_cdev);
 
        /* register the attributes */
-       class_device_create_file(led_cdev->class_dev,
-                               &class_device_attr_brightness);
+       rc = class_device_create_file(led_cdev->class_dev,
+                                     &class_device_attr_brightness);
+       if (rc)
+               goto err_out;
 
        /* add to the list of leds */
        write_lock(&leds_list_lock);
@@ -110,16 +114,28 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 #ifdef CONFIG_LEDS_TRIGGERS
        rwlock_init(&led_cdev->trigger_lock);
 
-       led_trigger_set_default(led_cdev);
+       rc = class_device_create_file(led_cdev->class_dev,
+                                     &class_device_attr_trigger);
+       if (rc)
+               goto err_out_led_list;
 
-       class_device_create_file(led_cdev->class_dev,
-                               &class_device_attr_trigger);
+       led_trigger_set_default(led_cdev);
 #endif
 
        printk(KERN_INFO "Registered led device: %s\n",
                        led_cdev->class_dev->class_id);
 
        return 0;
+
+#ifdef CONFIG_LEDS_TRIGGERS
+err_out_led_list:
+       class_device_remove_file(led_cdev->class_dev,
+                               &class_device_attr_brightness);
+       list_del(&led_cdev->node);
+#endif
+err_out:
+       class_device_unregister(led_cdev->class_dev);
+       return rc;
 }
 EXPORT_SYMBOL_GPL(led_classdev_register);
 
index 179c2876b5416cf871dbb7c3c3ce08ecebe8b137..29a8818a32ec23e71fda137f1e634e46de538559 100644 (file)
@@ -123,6 +123,7 @@ static CLASS_DEVICE_ATTR(delay_off, 0644, led_delay_off_show,
 static void timer_trig_activate(struct led_classdev *led_cdev)
 {
        struct timer_trig_data *timer_data;
+       int rc;
 
        timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL);
        if (!timer_data)
@@ -134,10 +135,21 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
        timer_data->timer.function = led_timer_function;
        timer_data->timer.data = (unsigned long) led_cdev;
 
-       class_device_create_file(led_cdev->class_dev,
+       rc = class_device_create_file(led_cdev->class_dev,
                                &class_device_attr_delay_on);
-       class_device_create_file(led_cdev->class_dev,
+       if (rc) goto err_out;
+       rc = class_device_create_file(led_cdev->class_dev,
                                &class_device_attr_delay_off);
+       if (rc) goto err_out_delayon;
+
+       return;
+
+err_out_delayon:
+       class_device_remove_file(led_cdev->class_dev,
+                               &class_device_attr_delay_on);
+err_out:
+       led_cdev->trigger_data = NULL;
+       kfree(timer_data);
 }
 
 static void timer_trig_deactivate(struct led_classdev *led_cdev)
index 09baa43b259975f05c3a28fe0e5f17a551418537..da862e4632dd0535b1c6047c3be5377b8f1bf1f8 100644 (file)
@@ -100,6 +100,7 @@ static DEVICE_ATTR(pos, S_IRUGO, mca_show_pos, NULL);
 int __init mca_register_device(int bus, struct mca_device *mca_dev)
 {
        struct mca_bus *mca_bus = mca_root_busses[bus];
+       int rc;
 
        mca_dev->dev.parent = &mca_bus->dev;
        mca_dev->dev.bus = &mca_bus_type;
@@ -108,13 +109,23 @@ int __init mca_register_device(int bus, struct mca_device *mca_dev)
        mca_dev->dev.dma_mask = &mca_dev->dma_mask;
        mca_dev->dev.coherent_dma_mask = mca_dev->dma_mask;
 
-       if (device_register(&mca_dev->dev))
-               return 0;
+       rc = device_register(&mca_dev->dev);
+       if (rc)
+               goto err_out;
 
-       device_create_file(&mca_dev->dev, &dev_attr_id);
-       device_create_file(&mca_dev->dev, &dev_attr_pos);
+       rc = device_create_file(&mca_dev->dev, &dev_attr_id);
+       if (rc) goto err_out_devreg;
+       rc = device_create_file(&mca_dev->dev, &dev_attr_pos);
+       if (rc) goto err_out_id;
 
        return 1;
+
+err_out_id:
+       device_remove_file(&mca_dev->dev, &dev_attr_id);
+err_out_devreg:
+       device_unregister(&mca_dev->dev);
+err_out:
+       return 0;
 }
 
 /* */
@@ -130,13 +141,16 @@ struct mca_bus * __devinit mca_attach_bus(int bus)
                return NULL;
        }
 
-       mca_bus = kmalloc(sizeof(struct mca_bus), GFP_KERNEL);
+       mca_bus = kzalloc(sizeof(struct mca_bus), GFP_KERNEL);
        if (!mca_bus)
                return NULL;
-       memset(mca_bus, 0, sizeof(struct mca_bus));
+
        sprintf(mca_bus->dev.bus_id,"mca%d",bus);
        sprintf(mca_bus->name,"Host %s MCA Bridge", bus ? "Secondary" : "Primary");
-       device_register(&mca_bus->dev);
+       if (device_register(&mca_bus->dev)) {
+               kfree(mca_bus);
+               return NULL;
+       }
 
        mca_root_busses[bus] = mca_bus;
 
index 8e67634e79a0d1f16d033e9e7ecdf3bcdd416f56..d47d38ac71b16f31f530d1bfd9656eb0e288f401 100644 (file)
@@ -1413,7 +1413,7 @@ int bitmap_create(mddev_t *mddev)
        int err;
        sector_t start;
 
-       BUG_ON(sizeof(bitmap_super_t) != 256);
+       BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
 
        if (!file && !mddev->bitmap_offset) /* bitmap disabled, nothing to do */
                return 0;
index 57fa64f93e5f1815ce57c398d8f1ae9cc64d32b1..f7f19088f3be4f05aada21cd33873cda2d91d56b 100644 (file)
@@ -4912,6 +4912,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
 }
 
 static struct file_operations md_seq_fops = {
+       .owner          = THIS_MODULE,
        .open           = md_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
index fb6c4cc8477db818d04fb061f0893584b428690e..14e69a736edae5f50bf3207b9c1b0a205199faff 100644 (file)
@@ -665,6 +665,10 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
        case BTTV_BOARD_TWINHAN_DST:
                /*      DST is not a frontend driver !!!                */
                state = (struct dst_state *) kmalloc(sizeof (struct dst_state), GFP_KERNEL);
+               if (!state) {
+                       printk("dvb_bt8xx: No memory\n");
+                       break;
+               }
                /*      Setup the Card                                  */
                state->config = &dst_config;
                state->i2c = card->i2c_adapter;
index e46eae3b9be2624690746413bbb2c74601e2af54..1990eda10c469a52471495d8c865d7fa6d908985 100644 (file)
@@ -19,6 +19,6 @@ config DVB_CORE_ATTACH
          allow the card drivers to only load the frontend modules
          they require. This saves several KBytes of memory.
 
-         Note: You will need moudule-init-tools v3.2 or later for this feature.
+         Note: You will need module-init-tools v3.2 or later for this feature.
 
          If unsure say Y.
index fd3a9902f98d472662f69ee96690c5d11d673814..5143e426d283cc539043e0234d108f8eb7d7915c 100644 (file)
@@ -169,7 +169,7 @@ EXPORT_SYMBOL(dibusb_read_eeprom_byte);
 // Config Adjacent channels  Perf -cal22
 static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
        .band_caps = BAND_VHF | BAND_UHF,
-       .setup     = (0 << 15) | (0 << 14) | (1 << 13) | (1 << 12) | (29 << 0),
+       .setup     = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
 
        .agc1_max = 48497,
        .agc1_min = 23593,
@@ -196,10 +196,14 @@ static struct dib3000mc_config stk3000p_dib3000p_config = {
        .ln_adc_level = 0x1cc7,
 
        .output_mpeg2_in_188_bytes = 1,
+
+       .agc_command1 = 1,
+       .agc_command2 = 1,
 };
 
 static struct dibx000_agc_config dib3000p_panasonic_agc_config = {
-       .setup    = (0 << 15) | (0 << 14) | (1 << 13) | (1 << 12) | (29 << 0),
+       .band_caps = BAND_VHF | BAND_UHF,
+       .setup     = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
 
        .agc1_max = 56361,
        .agc1_min = 22282,
@@ -226,6 +230,9 @@ static struct dib3000mc_config mod3000p_dib3000p_config = {
        .ln_adc_level = 0x1cc7,
 
        .output_mpeg2_in_188_bytes = 1,
+
+       .agc_command1 = 1,
+       .agc_command2 = 1,
 };
 
 int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
index 5153fb943da1fce0e429648acb495d644e2cba7c..b607810327426f17bdc7a961974971b815da5253 100644 (file)
@@ -99,7 +99,9 @@
 struct dibusb_state {
        struct dib_fe_xfer_ops ops;
        int mt2060_present;
+};
 
+struct dibusb_device_state {
        /* for RC5 remote control */
        int old_toggle;
        int last_repeat_count;
index a9219bf69b8927faf0da389c1dddddc06742ef7e..a58874c790b20505d7db03213995b8bb14b66bcc 100644 (file)
@@ -75,7 +75,7 @@ static int nova_t_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
        u8 key[5],cmd[2] = { DIBUSB_REQ_POLL_REMOTE, 0x35 }, data,toggle,custom;
        u16 raw;
        int i;
-       struct dibusb_state *st = d->priv;
+       struct dibusb_device_state *st = d->priv;
 
        dvb_usb_generic_rw(d,cmd,2,key,5,0);
 
@@ -184,6 +184,7 @@ static struct dvb_usb_device_properties nova_t_properties = {
                        .size_of_priv     = sizeof(struct dibusb_state),
                }
        },
+       .size_of_priv     = sizeof(struct dibusb_device_state),
 
        .power_ctrl       = dibusb2_0_power_ctrl,
        .read_mac_address = nova_t_read_mac_address,
index ccc813b525d6d084217bc952a7bffe3bc2d63302..3561a777568c8cd3cee259c2db59c559744a8332 100644 (file)
@@ -345,7 +345,7 @@ static int dib3000mc_init(struct dvb_frontend *demod)
 
        /* agc */
        dib3000mc_write_word(state, 36, state->cfg->max_time);
-       dib3000mc_write_word(state, 37, agc->setup);
+       dib3000mc_write_word(state, 37, (state->cfg->agc_command1 << 13) | (state->cfg->agc_command2 << 12) | (0x1d << 0));
        dib3000mc_write_word(state, 38, state->cfg->pwm3_value);
        dib3000mc_write_word(state, 39, state->cfg->ln_adc_level);
 
index b198cd5b18436c65890bcef1a507f3a53dc0f6fe..0d6fdef775385e2269ae378d02afe0215ac4ee36 100644 (file)
@@ -28,6 +28,9 @@ struct dib3000mc_config {
        u16 max_time;
        u16 ln_adc_level;
 
+       u8 agc_command1 :1;
+       u8 agc_command2 :1;
+
        u8 mobile_mode;
 
        u8 output_mpeg2_in_188_bytes;
index e8061db1112361c4ff910b38b4a28cbd8eef6358..18457adee30bd08511394098fb2d55ae07e194ca 100644 (file)
@@ -35,7 +35,16 @@ struct tda10086_config
        u8 invert;
 };
 
+#if defined(CONFIG_DVB_TDA10086) || defined(CONFIG_DVB_TDA10086_MODULE)
 extern struct dvb_frontend* tda10086_attach(const struct tda10086_config* config,
                                            struct i2c_adapter* i2c);
+#else
+static inline struct dvb_frontend* tda10086_attach(const struct tda10086_config* config,
+                                                  struct i2c_adapter* i2c)
+{
+       printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
+       return NULL;
+}
+#endif // CONFIG_DVB_TDA10086
 
 #endif // TDA10086_H
index 3307607632b0c2e4872a08e46d3f09cc74b7597a..83998c001196659deb242cf3161a76e77022bef0 100644 (file)
  * @param has_loopthrough Set to 1 if the card has a loopthrough RF connector.
  * @return FE pointer on success, NULL on failure.
  */
-extern struct dvb_frontend *tda826x_attach(struct dvb_frontend *fe, int addr, struct i2c_adapter *i2c, int has_loopthrough);
-
-#endif
+#if defined(CONFIG_DVB_TDA826X) || defined(CONFIG_DVB_TDA826X_MODULE)
+extern struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe, int addr,
+                                          struct i2c_adapter *i2c,
+                                          int has_loopthrough);
+#else
+static inline struct dvb_frontend* tda826x_attach(struct dvb_frontend *fe,
+                                                 int addr,
+                                                 struct i2c_adapter *i2c,
+                                                 int has_loopthrough)
+{
+       printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __FUNCTION__);
+       return NULL;
+}
+#endif // CONFIG_DVB_TDA826X
+
+#endif // __DVB_TDA826X_H__
index afb734df6e0591ac461572c9d998f670a633aac5..fbe5b6168cc29c287416926ae42bbb26b587725c 100644 (file)
@@ -677,6 +677,8 @@ config VIDEO_M32R_AR_M64278
 menu "V4L USB devices"
        depends on USB && VIDEO_DEV
 
+source "drivers/media/video/pvrusb2/Kconfig"
+
 source "drivers/media/video/em28xx/Kconfig"
 
 source "drivers/media/video/usbvideo/Kconfig"
index 48014a254e15fdcf10b31c2ac255d5543083643d..f85f2084324fb9ec9f8189de75f0a6f74f41649d 100644 (file)
@@ -235,6 +235,7 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
                        0, 0, V4L2_SLICED_VPS, 0, 0,    /* 9 */
                        0, 0, 0, 0
                };
+               int is_pal = !(cx25840_get_v4lstd(client) & V4L2_STD_525_60);
                int i;
 
                fmt = arg;
@@ -246,13 +247,25 @@ int cx25840_vbi(struct i2c_client *client, unsigned int cmd, void *arg)
                if ((cx25840_read(client, 0x404) & 0x10) == 0)
                        break;
 
-               for (i = 7; i <= 23; i++) {
-                       u8 v = cx25840_read(client, 0x424 + i - 7);
+               if (is_pal) {
+                       for (i = 7; i <= 23; i++) {
+                               u8 v = cx25840_read(client, 0x424 + i - 7);
+
+                               svbi->service_lines[0][i] = lcr2vbi[v >> 4];
+                               svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
+                               svbi->service_set |=
+                                       svbi->service_lines[0][i] | svbi->service_lines[1][i];
+                       }
+               }
+               else {
+                       for (i = 10; i <= 21; i++) {
+                               u8 v = cx25840_read(client, 0x424 + i - 10);
 
-                       svbi->service_lines[0][i] = lcr2vbi[v >> 4];
-                       svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
-                       svbi->service_set |=
-                                svbi->service_lines[0][i] | svbi->service_lines[1][i];
+                               svbi->service_lines[0][i] = lcr2vbi[v >> 4];
+                               svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
+                               svbi->service_set |=
+                                       svbi->service_lines[0][i] | svbi->service_lines[1][i];
+                       }
                }
                break;
        }
index af71d4225c763eefafa18907a9efd4c8c805a133..f764a57c56be53abdca042ad26211cdc80c7ec90 100644 (file)
@@ -1230,6 +1230,7 @@ struct cx88_board cx88_boards[] = {
                        .vmux   = 2,
                        .gpio0  = 0x84bf,
                }},
+               .mpeg           = CX88_MPEG_DVB,
        },
        [CX88_BOARD_NORWOOD_MICRO] = {
                .name           = "Norwood Micro TV Tuner",
@@ -1590,6 +1591,18 @@ struct cx88_subid cx88_subids[] = {
                .subvendor = 0x0070,
                .subdevice = 0x9000,
                .card      = CX88_BOARD_HAUPPAUGE_DVB_T1,
+       },{
+               .subvendor = 0x0070,
+               .subdevice = 0x1400,
+               .card      = CX88_BOARD_HAUPPAUGE_HVR3000,
+       },{
+               .subvendor = 0x0070,
+               .subdevice = 0x1401,
+               .card      = CX88_BOARD_HAUPPAUGE_HVR3000,
+       },{
+               .subvendor = 0x0070,
+               .subdevice = 0x1402,
+               .card      = CX88_BOARD_HAUPPAUGE_HVR3000,
        },
 };
 const unsigned int cx88_idcount = ARRAY_SIZE(cx88_subids);
@@ -1633,7 +1646,15 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
        /* Make sure we support the board model */
        switch (tv.model)
        {
+       case 14009: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in) */
+       case 14019: /* WinTV-HVR3000 (Retail, IR Blaster, b/panel video, 3.5mm audio in) */
+       case 14029: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in - 880 bridge) */
+       case 14109: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in - low profile) */
+       case 14129: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in - 880 bridge - LP) */
+       case 14559: /* WinTV-HVR3000 (OEM, no IR, b/panel video, 3.5mm audio in) */
        case 14569: /* WinTV-HVR3000 (OEM, no IR, no back panel video) */
+       case 14659: /* WinTV-HVR3000 (OEM, no IR, b/panel video, RCA audio in - Low profile) */
+       case 14669: /* WinTV-HVR3000 (OEM, no IR, no b/panel video - Low profile) */
        case 28552: /* WinTV-PVR 'Roslyn' (No IR) */
        case 34519: /* WinTV-PCI-FM */
        case 90002: /* Nova-T-PCI (9002) */
index bd0c8797f26d6289898dcf1de0e5480a4b02c7b6..0ef13e7efa2ee2efc6841913338ed48c2f22fe29 100644 (file)
@@ -315,15 +315,22 @@ static struct cx22702_config hauppauge_novat_config = {
        .demod_address = 0x43,
        .output_mode   = CX22702_SERIAL_OUTPUT,
 };
+
 static struct cx22702_config hauppauge_hvr1100_config = {
        .demod_address = 0x63,
        .output_mode   = CX22702_SERIAL_OUTPUT,
 };
+
 static struct cx22702_config hauppauge_hvr1300_config = {
        .demod_address = 0x63,
        .output_mode   = CX22702_SERIAL_OUTPUT,
 };
 
+static struct cx22702_config hauppauge_hvr3000_config = {
+       .demod_address = 0x63,
+       .output_mode = CX22702_SERIAL_OUTPUT,
+};
+
 static int or51132_set_ts_param(struct dvb_frontend* fe,
                                int is_punctured)
 {
@@ -558,6 +565,16 @@ static int dvb_register(struct cx8802_dev *dev)
                                   &dvb_pll_fmd1216me);
                }
                break;
+       case CX88_BOARD_HAUPPAUGE_HVR3000:
+               dev->dvb.frontend = dvb_attach(cx22702_attach,
+                                              &hauppauge_hvr3000_config,
+                                              &dev->core->i2c_adap);
+               if (dev->dvb.frontend != NULL) {
+                       dvb_attach(dvb_pll_attach, dev->dvb.frontend, 0x61,
+                                  &dev->core->i2c_adap,
+                                  &dvb_pll_fmd1216me);
+               }
+               break;
        case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_PLUS:
                dev->dvb.frontend = dvb_attach(mt352_attach,
                                               &dvico_fusionhdtv,
index 83ebf7a3c054a9bfa44d384175bdba3d72acae48..ee48995a4ab5e92d350d119dfb4f4aa5eff66394 100644 (file)
@@ -196,6 +196,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
        case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
        case CX88_BOARD_HAUPPAUGE_HVR1100:
        case CX88_BOARD_HAUPPAUGE_HVR1300:
+       case CX88_BOARD_HAUPPAUGE_HVR3000:
                ir_codes = ir_codes_hauppauge_new;
                ir_type = IR_TYPE_RC5;
                ir->sampling = 1;
@@ -419,6 +420,7 @@ void cx88_ir_irq(struct cx88_core *core)
        case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
        case CX88_BOARD_HAUPPAUGE_HVR1100:
        case CX88_BOARD_HAUPPAUGE_HVR1300:
+       case CX88_BOARD_HAUPPAUGE_HVR3000:
                ircode = ir_decode_biphase(ir->samples, ir->scount, 5, 7);
                ir_dprintk("biphase decoded: %x\n", ircode);
                if ((ircode & 0xfffff000) != 0x3000)
index bc544cc7ccb80154f378b050eaf46c10077e6501..f786ab11d2cd9b1a48cb0aff6bd70f85c19abd2f 100644 (file)
@@ -973,16 +973,32 @@ static CLASS_DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR,
                         et61x251_show_i2c_val, et61x251_store_i2c_val);
 
 
-static void et61x251_create_sysfs(struct et61x251_device* cam)
+static int et61x251_create_sysfs(struct et61x251_device* cam)
 {
        struct video_device *v4ldev = cam->v4ldev;
+       int rc;
 
-       video_device_create_file(v4ldev, &class_device_attr_reg);
-       video_device_create_file(v4ldev, &class_device_attr_val);
+       rc = video_device_create_file(v4ldev, &class_device_attr_reg);
+       if (rc) goto err;
+       rc = video_device_create_file(v4ldev, &class_device_attr_val);
+       if (rc) goto err_reg;
        if (cam->sensor.sysfs_ops) {
-               video_device_create_file(v4ldev, &class_device_attr_i2c_reg);
-               video_device_create_file(v4ldev, &class_device_attr_i2c_val);
+               rc = video_device_create_file(v4ldev, &class_device_attr_i2c_reg);
+               if (rc) goto err_val;
+               rc = video_device_create_file(v4ldev, &class_device_attr_i2c_val);
+               if (rc) goto err_i2c_reg;
        }
+
+       return 0;
+
+err_i2c_reg:
+       video_device_remove_file(v4ldev, &class_device_attr_i2c_reg);
+err_val:
+       video_device_remove_file(v4ldev, &class_device_attr_val);
+err_reg:
+       video_device_remove_file(v4ldev, &class_device_attr_reg);
+err:
+       return rc;
 }
 #endif /* CONFIG_VIDEO_ADV_DEBUG */
 
@@ -2534,7 +2550,9 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
        dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0;
 
 #ifdef CONFIG_VIDEO_ADV_DEBUG
-       et61x251_create_sysfs(cam);
+       err = et61x251_create_sysfs(cam);
+       if (err)
+               goto fail2;
        DBG(2, "Optional device control through 'sysfs' interface ready");
 #endif
 
@@ -2544,6 +2562,13 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
 
        return 0;
 
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+fail2:
+       video_nr[dev_nr] = -1;
+       dev_nr = (dev_nr < ET61X251_MAX_DEVICES-1) ? dev_nr+1 : 0;
+       mutex_unlock(&cam->dev_mutex);
+       video_unregister_device(cam->v4ldev);
+#endif
 fail:
        if (cam) {
                kfree(cam->control_buffer);
index ce4886f1528dd322c3762f98608920c350d48f3d..b4db2cbb5a84246151e0d39ca589e7ceb50f915e 100644 (file)
@@ -5648,17 +5648,49 @@ static ssize_t show_exposure(struct class_device *cd, char *buf)
 }
 static CLASS_DEVICE_ATTR(exposure, S_IRUGO, show_exposure, NULL);
 
-static void ov_create_sysfs(struct video_device *vdev)
+static int ov_create_sysfs(struct video_device *vdev)
 {
-       video_device_create_file(vdev, &class_device_attr_custom_id);
-       video_device_create_file(vdev, &class_device_attr_model);
-       video_device_create_file(vdev, &class_device_attr_bridge);
-       video_device_create_file(vdev, &class_device_attr_sensor);
-       video_device_create_file(vdev, &class_device_attr_brightness);
-       video_device_create_file(vdev, &class_device_attr_saturation);
-       video_device_create_file(vdev, &class_device_attr_contrast);
-       video_device_create_file(vdev, &class_device_attr_hue);
-       video_device_create_file(vdev, &class_device_attr_exposure);
+       int rc;
+
+       rc = video_device_create_file(vdev, &class_device_attr_custom_id);
+       if (rc) goto err;
+       rc = video_device_create_file(vdev, &class_device_attr_model);
+       if (rc) goto err_id;
+       rc = video_device_create_file(vdev, &class_device_attr_bridge);
+       if (rc) goto err_model;
+       rc = video_device_create_file(vdev, &class_device_attr_sensor);
+       if (rc) goto err_bridge;
+       rc = video_device_create_file(vdev, &class_device_attr_brightness);
+       if (rc) goto err_sensor;
+       rc = video_device_create_file(vdev, &class_device_attr_saturation);
+       if (rc) goto err_bright;
+       rc = video_device_create_file(vdev, &class_device_attr_contrast);
+       if (rc) goto err_sat;
+       rc = video_device_create_file(vdev, &class_device_attr_hue);
+       if (rc) goto err_contrast;
+       rc = video_device_create_file(vdev, &class_device_attr_exposure);
+       if (rc) goto err_hue;
+
+       return 0;
+
+err_hue:
+       video_device_remove_file(vdev, &class_device_attr_hue);
+err_contrast:
+       video_device_remove_file(vdev, &class_device_attr_contrast);
+err_sat:
+       video_device_remove_file(vdev, &class_device_attr_saturation);
+err_bright:
+       video_device_remove_file(vdev, &class_device_attr_brightness);
+err_sensor:
+       video_device_remove_file(vdev, &class_device_attr_sensor);
+err_bridge:
+       video_device_remove_file(vdev, &class_device_attr_bridge);
+err_model:
+       video_device_remove_file(vdev, &class_device_attr_model);
+err_id:
+       video_device_remove_file(vdev, &class_device_attr_custom_id);
+err:
+       return rc;
 }
 
 /****************************************************************************
@@ -5817,7 +5849,11 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
             ov->vdev->minor);
 
        usb_set_intfdata(intf, ov);
-       ov_create_sysfs(ov->vdev);
+       if (ov_create_sysfs(ov->vdev)) {
+               err("ov_create_sysfs failed");
+               goto error;
+       }
+
        return 0;
 
 error:
index c77b85cf3d8056a9a1d67534c064da9b2740138f..46c1148308843a4d8ed3e0db02d6f20fd8f73c76 100644 (file)
@@ -1024,12 +1024,25 @@ static ssize_t show_snapshot_button_status(struct class_device *class_dev, char
 static CLASS_DEVICE_ATTR(button, S_IRUGO | S_IWUSR, show_snapshot_button_status,
                         NULL);
 
-static void pwc_create_sysfs_files(struct video_device *vdev)
+static int pwc_create_sysfs_files(struct video_device *vdev)
 {
        struct pwc_device *pdev = video_get_drvdata(vdev);
-       if (pdev->features & FEATURE_MOTOR_PANTILT)
-               video_device_create_file(vdev, &class_device_attr_pan_tilt);
-       video_device_create_file(vdev, &class_device_attr_button);
+       int rc;
+
+       rc = video_device_create_file(vdev, &class_device_attr_button);
+       if (rc)
+               goto err;
+       if (pdev->features & FEATURE_MOTOR_PANTILT) {
+               rc = video_device_create_file(vdev,&class_device_attr_pan_tilt);
+               if (rc) goto err_button;
+       }
+
+       return 0;
+
+err_button:
+       video_device_remove_file(vdev, &class_device_attr_button);
+err:
+       return rc;
 }
 
 static void pwc_remove_sysfs_files(struct video_device *vdev)
@@ -1408,7 +1421,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        struct usb_device *udev = interface_to_usbdev(intf);
        struct pwc_device *pdev = NULL;
        int vendor_id, product_id, type_id;
-       int i, hint;
+       int i, hint, rc;
        int features = 0;
        int video_nr = -1; /* default: use next available device */
        char serial_number[30], *name;
@@ -1709,9 +1722,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        i = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
        if (i < 0) {
                PWC_ERROR("Failed to register as video device (%d).\n", i);
-               video_device_release(pdev->vdev); /* Drip... drip... drip... */
-               kfree(pdev); /* Oops, no memory leaks please */
-               return -EIO;
+               rc = i;
+               goto err;
        }
        else {
                PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->minor & 0x3F);
@@ -1723,13 +1735,24 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
 
        PWC_DEBUG_PROBE("probe() function returning struct at 0x%p.\n", pdev);
        usb_set_intfdata (intf, pdev);
-       pwc_create_sysfs_files(pdev->vdev);
+       rc = pwc_create_sysfs_files(pdev->vdev);
+       if (rc)
+               goto err_unreg;
 
        /* Set the leds off */
        pwc_set_leds(pdev, 0, 0);
        pwc_camera_power(pdev, 0);
 
        return 0;
+
+err_unreg:
+       if (hint < MAX_DEV_HINTS)
+               device_hint[hint].pdev = NULL;
+       video_unregister_device(pdev->vdev);
+err:
+       video_device_release(pdev->vdev); /* Drip... drip... drip... */
+       kfree(pdev); /* Oops, no memory leaks please */
+       return rc;
 }
 
 /* The user janked out the cable... */
index 974179d4d3895b2dd3135b245d1e5158c4a8e213..c5719f7bd1acafbaee6f53c8553363c35f1b5bf2 100644 (file)
@@ -960,6 +960,8 @@ static void saa711x_set_v4lstd(struct i2c_client *client, v4l2_std_id std)
                        reg |= 0x10;
                } else if (std == V4L2_STD_NTSC_M_JP) {
                        reg |= 0x40;
+               } else if (std == V4L2_STD_SECAM) {
+                       reg |= 0x50;
                }
                saa711x_write(client, R_0E_CHROMA_CNTL_1, reg);
        } else {
index 203302f21827e5104e8b7d1b2485afbb369b76c2..830617ea81cc5ca0b6c68fce66fe45be112a1fc8 100644 (file)
@@ -2248,7 +2248,11 @@ static int radio_do_ioctl(struct inode *inode, struct file *file,
                t->type = V4L2_TUNER_RADIO;
 
                saa7134_i2c_call_clients(dev, VIDIOC_G_TUNER, t);
-
+               if (dev->input->amux == TV) {
+                       t->signal = 0xf800 - ((saa_readb(0x581) & 0x1f) << 11);
+                       t->rxsubchans = (saa_readb(0x529) & 0x08) ?
+                                       V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO;
+               }
                return 0;
        }
        case VIDIOC_S_TUNER:
index 3e0ff8a78468a0eb41ea31d4e8a5d30fbbdf3eda..a4702d3c2aca4f295faa28dff8ca5c9c37bf49e1 100644 (file)
@@ -1240,23 +1240,53 @@ static CLASS_DEVICE_ATTR(frame_header, S_IRUGO,
                         sn9c102_show_frame_header, NULL);
 
 
-static void sn9c102_create_sysfs(struct sn9c102_device* cam)
+static int sn9c102_create_sysfs(struct sn9c102_device* cam)
 {
        struct video_device *v4ldev = cam->v4ldev;
+       int rc;
+
+       rc = video_device_create_file(v4ldev, &class_device_attr_reg);
+       if (rc) goto err;
+       rc = video_device_create_file(v4ldev, &class_device_attr_val);
+       if (rc) goto err_reg;
+       rc = video_device_create_file(v4ldev, &class_device_attr_frame_header);
+       if (rc) goto err_val;
 
-       video_device_create_file(v4ldev, &class_device_attr_reg);
-       video_device_create_file(v4ldev, &class_device_attr_val);
-       video_device_create_file(v4ldev, &class_device_attr_frame_header);
-       if (cam->bridge == BRIDGE_SN9C101 || cam->bridge == BRIDGE_SN9C102)
-               video_device_create_file(v4ldev, &class_device_attr_green);
-       else if (cam->bridge == BRIDGE_SN9C103) {
-               video_device_create_file(v4ldev, &class_device_attr_blue);
-               video_device_create_file(v4ldev, &class_device_attr_red);
-       }
        if (cam->sensor.sysfs_ops) {
-               video_device_create_file(v4ldev, &class_device_attr_i2c_reg);
-               video_device_create_file(v4ldev, &class_device_attr_i2c_val);
+               rc = video_device_create_file(v4ldev, &class_device_attr_i2c_reg);
+               if (rc) goto err_frhead;
+               rc = video_device_create_file(v4ldev, &class_device_attr_i2c_val);
+               if (rc) goto err_i2c_reg;
+       }
+
+       if (cam->bridge == BRIDGE_SN9C101 || cam->bridge == BRIDGE_SN9C102) {
+               rc = video_device_create_file(v4ldev, &class_device_attr_green);
+               if (rc) goto err_i2c_val;
+       } else if (cam->bridge == BRIDGE_SN9C103) {
+               rc = video_device_create_file(v4ldev, &class_device_attr_blue);
+               if (rc) goto err_i2c_val;
+               rc = video_device_create_file(v4ldev, &class_device_attr_red);
+               if (rc) goto err_blue;
        }
+
+       return 0;
+
+err_blue:
+       video_device_remove_file(v4ldev, &class_device_attr_blue);
+err_i2c_val:
+       if (cam->sensor.sysfs_ops)
+               video_device_remove_file(v4ldev, &class_device_attr_i2c_val);
+err_i2c_reg:
+       if (cam->sensor.sysfs_ops)
+               video_device_remove_file(v4ldev, &class_device_attr_i2c_reg);
+err_frhead:
+       video_device_remove_file(v4ldev, &class_device_attr_frame_header);
+err_val:
+       video_device_remove_file(v4ldev, &class_device_attr_val);
+err_reg:
+       video_device_remove_file(v4ldev, &class_device_attr_reg);
+err:
+       return rc;
 }
 #endif /* CONFIG_VIDEO_ADV_DEBUG */
 
@@ -2809,10 +2839,7 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
                DBG(1, "V4L2 device registration failed");
                if (err == -ENFILE && video_nr[dev_nr] == -1)
                        DBG(1, "Free /dev/videoX node not found");
-               video_nr[dev_nr] = -1;
-               dev_nr = (dev_nr < SN9C102_MAX_DEVICES-1) ? dev_nr+1 : 0;
-               mutex_unlock(&cam->dev_mutex);
-               goto fail;
+               goto fail2;
        }
 
        DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor);
@@ -2823,7 +2850,9 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
        dev_nr = (dev_nr < SN9C102_MAX_DEVICES-1) ? dev_nr+1 : 0;
 
 #ifdef CONFIG_VIDEO_ADV_DEBUG
-       sn9c102_create_sysfs(cam);
+       err = sn9c102_create_sysfs(cam);
+       if (err)
+               goto fail3;
        DBG(2, "Optional device control through 'sysfs' interface ready");
 #endif
 
@@ -2833,6 +2862,14 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
 
        return 0;
 
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+fail3:
+       video_unregister_device(cam->v4ldev);
+#endif
+fail2:
+       video_nr[dev_nr] = -1;
+       dev_nr = (dev_nr < SN9C102_MAX_DEVICES-1) ? dev_nr+1 : 0;
+       mutex_unlock(&cam->dev_mutex);
 fail:
        if (cam) {
                kfree(cam->control_buffer);
index 87e11300181dd26b708c4f73bd4b1f2ab8daba8f..6d1ef1e2e8efd9dbf8be48763ee7283b3cf42021 100644 (file)
@@ -516,16 +516,45 @@ stv680_file(frames_read, framecount, "%d\n");
 stv680_file(packets_dropped, dropped, "%d\n");
 stv680_file(decoding_errors, error, "%d\n");
 
-static void stv680_create_sysfs_files(struct video_device *vdev)
+static int stv680_create_sysfs_files(struct video_device *vdev)
 {
-       video_device_create_file(vdev, &class_device_attr_model);
-       video_device_create_file(vdev, &class_device_attr_in_use);
-       video_device_create_file(vdev, &class_device_attr_streaming);
-       video_device_create_file(vdev, &class_device_attr_palette);
-       video_device_create_file(vdev, &class_device_attr_frames_total);
-       video_device_create_file(vdev, &class_device_attr_frames_read);
-       video_device_create_file(vdev, &class_device_attr_packets_dropped);
-       video_device_create_file(vdev, &class_device_attr_decoding_errors);
+       int rc;
+
+       rc = video_device_create_file(vdev, &class_device_attr_model);
+       if (rc) goto err;
+       rc = video_device_create_file(vdev, &class_device_attr_in_use);
+       if (rc) goto err_model;
+       rc = video_device_create_file(vdev, &class_device_attr_streaming);
+       if (rc) goto err_inuse;
+       rc = video_device_create_file(vdev, &class_device_attr_palette);
+       if (rc) goto err_stream;
+       rc = video_device_create_file(vdev, &class_device_attr_frames_total);
+       if (rc) goto err_pal;
+       rc = video_device_create_file(vdev, &class_device_attr_frames_read);
+       if (rc) goto err_framtot;
+       rc = video_device_create_file(vdev, &class_device_attr_packets_dropped);
+       if (rc) goto err_framread;
+       rc = video_device_create_file(vdev, &class_device_attr_decoding_errors);
+       if (rc) goto err_dropped;
+
+       return 0;
+
+err_dropped:
+       video_device_remove_file(vdev, &class_device_attr_packets_dropped);
+err_framread:
+       video_device_remove_file(vdev, &class_device_attr_frames_read);
+err_framtot:
+       video_device_remove_file(vdev, &class_device_attr_frames_total);
+err_pal:
+       video_device_remove_file(vdev, &class_device_attr_palette);
+err_stream:
+       video_device_remove_file(vdev, &class_device_attr_streaming);
+err_inuse:
+       video_device_remove_file(vdev, &class_device_attr_in_use);
+err_model:
+       video_device_remove_file(vdev, &class_device_attr_model);
+err:
+       return rc;
 }
 
 static void stv680_remove_sysfs_files(struct video_device *vdev)
@@ -1418,9 +1447,13 @@ static int stv680_probe (struct usb_interface *intf, const struct usb_device_id
        PDEBUG (0, "STV(i): registered new video device: video%d", stv680->vdev->minor);
 
        usb_set_intfdata (intf, stv680);
-       stv680_create_sysfs_files(stv680->vdev);
+       retval = stv680_create_sysfs_files(stv680->vdev);
+       if (retval)
+               goto error_unreg;
        return 0;
 
+error_unreg:
+       video_unregister_device(stv680->vdev);
 error_vdev:
        video_device_release(stv680->vdev);
 error:
index 8fff642fad56fa8e2579c2741d96818ea6850d94..781682373b61f3992d36ac9e938a62451cd61608 100644 (file)
@@ -1046,7 +1046,6 @@ static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = {
                .type   = TUNER_PARAM_TYPE_NTSC,
                .ranges = tuner_samsung_tcpn_2121p30a_ntsc_ranges,
                .count  = ARRAY_SIZE(tuner_samsung_tcpn_2121p30a_ntsc_ranges),
-               .has_tda9887 = 1,
        },
 };
 
index 479a0675cf60bf8ddfc7b6a372077be03063b35a..d424a4129d69ba188772282ec9f29a95374218c9 100644 (file)
  */
 
 #define dbgarg(cmd, fmt, arg...) \
-               if (vfd->debug & V4L2_DEBUG_IOCTL_ARG)                  \
+               if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) {                \
                        printk (KERN_DEBUG "%s: ",  vfd->name);         \
                        v4l_printk_ioctl(cmd);                          \
-                       printk (KERN_DEBUG "%s: " fmt, vfd->name, ## arg);
+                       printk (KERN_DEBUG "%s: " fmt, vfd->name, ## arg); \
+               }
 
 #define dbgarg2(fmt, arg...) \
                if (vfd->debug & V4L2_DEBUG_IOCTL_ARG)                  \
@@ -1287,6 +1288,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
                        ret=vfd->vidioc_g_parm(file, fh, p);
                } else {
                        struct v4l2_standard s;
+                       int i;
 
                        if (!vfd->tvnormsize) {
                                printk (KERN_WARNING "%s: no TV norms defined!\n",
@@ -1297,8 +1299,14 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
                        if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
                                return -EINVAL;
 
-                       v4l2_video_std_construct(&s, vfd->tvnorms[vfd->current_norm].id,
-                                                vfd->tvnorms[vfd->current_norm].name);
+                       for (i = 0; i < vfd->tvnormsize; i++)
+                               if (vfd->tvnorms[i].id == vfd->current_norm)
+                                       break;
+                       if (i >= vfd->tvnormsize)
+                               return -EINVAL;
+
+                       v4l2_video_std_construct(&s, vfd->current_norm,
+                                                vfd->tvnorms[i].name);
 
                        memset(p,0,sizeof(*p));
 
index e7c01d560b6460f1dd6de8ab19a87ed651556782..3c8dc72dc8e971662b7267d545ee71e8e98a22b9 100644 (file)
@@ -272,7 +272,7 @@ static void gen_line(struct sg_to_addr to_addr[],int inipos,int pages,int wmax,
 
        /* Get first addr pointed to pixel position */
        oldpg=get_addr_pos(pos,pages,to_addr);
-       pg=pfn_to_page(to_addr[oldpg].sg->dma_address >> PAGE_SHIFT);
+       pg=pfn_to_page(sg_dma_address(to_addr[oldpg].sg) >> PAGE_SHIFT);
        basep = kmap_atomic(pg, KM_BOUNCE_READ)+to_addr[oldpg].sg->offset;
 
        /* We will just duplicate the second pixel at the packet */
@@ -287,7 +287,7 @@ static void gen_line(struct sg_to_addr to_addr[],int inipos,int pages,int wmax,
                for (color=0;color<4;color++) {
                        pgpos=get_addr_pos(pos,pages,to_addr);
                        if (pgpos!=oldpg) {
-                               pg=pfn_to_page(to_addr[pgpos].sg->dma_address >> PAGE_SHIFT);
+                               pg=pfn_to_page(sg_dma_address(to_addr[pgpos].sg) >> PAGE_SHIFT);
                                kunmap_atomic(basep, KM_BOUNCE_READ);
                                basep= kmap_atomic(pg, KM_BOUNCE_READ)+to_addr[pgpos].sg->offset;
                                oldpg=pgpos;
@@ -339,8 +339,8 @@ static void gen_line(struct sg_to_addr to_addr[],int inipos,int pages,int wmax,
                                for (color=0;color<4;color++) {
                                        pgpos=get_addr_pos(pos,pages,to_addr);
                                        if (pgpos!=oldpg) {
-                                               pg=pfn_to_page(to_addr[pgpos].
-                                                               sg->dma_address
+                                               pg=pfn_to_page(sg_dma_address(
+                                                               to_addr[pgpos].sg)
                                                                >> PAGE_SHIFT);
                                                kunmap_atomic(basep,
                                                                KM_BOUNCE_READ);
@@ -386,7 +386,7 @@ static void vivi_fillbuff(struct vivi_dev *dev,struct vivi_buffer *buf)
        struct timeval ts;
 
        /* Test if DMA mapping is ready */
-       if (!vb->dma.sglist[0].dma_address)
+       if (!sg_dma_address(&vb->dma.sglist[0]))
                return;
 
        prep_to_addr(to_addr,vb);
@@ -783,7 +783,7 @@ static int vivi_map_sg(void *dev, struct scatterlist *sg, int nents,
        for (i = 0; i < nents; i++ ) {
                BUG_ON(!sg[i].page);
 
-               sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
+               sg_dma_address(&sg[i]) = page_to_phys(sg[i].page) + sg[i].offset;
        }
 
        return nents;
index ac06f10c54ec1416c1a9b75fdbde20e23051be22..d96c687aee9373fe7638fa61bb88e335b58b65ea 100644 (file)
@@ -80,18 +80,26 @@ static DEVICE_ATTR(scan, S_IWUSR, NULL, i2o_bus_store_scan);
  *     @dev: device to verify if it is a I2O Bus Adapter device
  *
  *     Because we want all Bus Adapters always return 0.
+ *     Except when we fail.  Then we are sad.
  *
- *     Returns 0.
+ *     Returns 0, except when we fail to excel.
  */
 static int i2o_bus_probe(struct device *dev)
 {
        struct i2o_device *i2o_dev = to_i2o_device(get_device(dev));
+       int rc;
 
-       device_create_file(dev, &dev_attr_scan);
+       rc = device_create_file(dev, &dev_attr_scan);
+       if (rc)
+               goto err_out;
 
        osm_info("device added (TID: %03x)\n", i2o_dev->lct_data.tid);
 
        return 0;
+
+err_out:
+       put_device(dev);
+       return rc;
 };
 
 /**
index 7bd4d85d0b42db6af28def4072cbd11fbadbec86..91f95d172ca54097a0b2b14d767b11aa179fed74 100644 (file)
@@ -325,13 +325,24 @@ static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
 static int i2o_exec_probe(struct device *dev)
 {
        struct i2o_device *i2o_dev = to_i2o_device(dev);
+       int rc;
 
-       i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
+       rc = i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
+       if (rc) goto err_out;
 
-       device_create_file(dev, &dev_attr_vendor_id);
-       device_create_file(dev, &dev_attr_product_id);
+       rc = device_create_file(dev, &dev_attr_vendor_id);
+       if (rc) goto err_evtreg;
+       rc = device_create_file(dev, &dev_attr_product_id);
+       if (rc) goto err_vid;
 
        return 0;
+
+err_vid:
+       device_remove_file(dev, &dev_attr_vendor_id);
+err_evtreg:
+       i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
+err_out:
+       return rc;
 };
 
 /**
index 3df0e7a07c46c31be7e6635f09f51f5fd489955e..b6c045dc97b4e1331e426323aa4010daffe67e0f 100644 (file)
@@ -28,6 +28,17 @@ config IBM_ASM
 
          If unsure, say N.
 
+config SGI_IOC4
+       tristate "SGI IOC4 Base IO support"
+       ---help---
+         This option enables basic support for the IOC4 chip on certain
+         SGI IO controller cards (IO9, IO10, and PCI-RT).  This option
+         does not enable any specific functions on such a card, but provides
+         necessary infrastructure for other drivers to utilize.
+
+         If you have an SGI Altix with an IOC4-based card say Y.
+         Otherwise say N.
+
 config TIFM_CORE
        tristate "TI Flash Media interface support (EXPERIMENTAL)"
        depends on EXPERIMENTAL
@@ -57,4 +68,23 @@ config TIFM_7XX1
           To compile this driver as a module, choose M here: the module will
          be called tifm_7xx1.
 
+config MSI_LAPTOP
+        tristate "MSI Laptop Extras"
+        depends on X86
+        depends on ACPI_EC
+        depends on BACKLIGHT_CLASS_DEVICE
+        ---help---
+         This is a driver for laptops built by MSI (MICRO-STAR
+         INTERNATIONAL):
+
+         MSI MegaBook S270 (MS-1013)
+         Cytron/TCM/Medion/Tchibo MD96100/SAM2000
+
+         It adds support for Bluetooth, WLAN and LCD brightness control.
+
+         More information about this driver is available at
+         <http://0pointer.de/lennart/tchibo.html>.
+
+         If you have an MSI S270 laptop, say Y or M here.
+
 endmenu
index d65ece76095a43283de2714a8c7dc0ac9a0f1625..c9e98ab021c5b096658012057a8e58542bbb1eef 100644 (file)
@@ -5,6 +5,8 @@ obj- := misc.o  # Dummy rule to force built-in.o to be made
 
 obj-$(CONFIG_IBM_ASM)          += ibmasm/
 obj-$(CONFIG_HDPU_FEATURES)    += hdpuftrs/
+obj-$(CONFIG_MSI_LAPTOP)     += msi-laptop.o
 obj-$(CONFIG_LKDTM)            += lkdtm.o
 obj-$(CONFIG_TIFM_CORE)        += tifm_core.o
 obj-$(CONFIG_TIFM_7XX1)        += tifm_7xx1.o
+obj-$(CONFIG_SGI_IOC4)         += ioc4.o
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
new file mode 100644 (file)
index 0000000..1c3c14a
--- /dev/null
@@ -0,0 +1,473 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+/* This file contains the master driver module for use by SGI IOC4 subdrivers.
+ *
+ * It allocates any resources shared between multiple subdevices, and
+ * provides accessor functions (where needed) and the like for those
+ * resources.  It also provides a mechanism for the subdevice modules
+ * to support loading and unloading.
+ *
+ * Non-shared resources (e.g. external interrupt A_INT_OUT register page
+ * alias, serial port and UART registers) are handled by the subdevice
+ * modules themselves.
+ *
+ * This is all necessary because IOC4 is not implemented as a multi-function
+ * PCI device, but an amalgamation of disparate registers for several
+ * types of device (ATA, serial, external interrupts).  The normal
+ * resource management in the kernel doesn't have quite the right interfaces
+ * to handle this situation (e.g. multiple modules can't claim the same
+ * PCI ID), thus this IOC4 master module.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ioc4.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+
+/***************
+ * Definitions *
+ ***************/
+
+/* Tweakable values */
+
+/* PCI bus speed detection/calibration */
+#define IOC4_CALIBRATE_COUNT 63                /* Calibration cycle period */
+#define IOC4_CALIBRATE_CYCLES 256      /* Average over this many cycles */
+#define IOC4_CALIBRATE_DISCARD 2       /* Discard first few cycles */
+#define IOC4_CALIBRATE_LOW_MHZ 25      /* Lower bound on bus speed sanity */
+#define IOC4_CALIBRATE_HIGH_MHZ 75     /* Upper bound on bus speed sanity */
+#define IOC4_CALIBRATE_DEFAULT_MHZ 66  /* Assumed if sanity check fails */
+
+/************************
+ * Submodule management *
+ ************************/
+
+static DEFINE_MUTEX(ioc4_mutex);
+
+static LIST_HEAD(ioc4_devices);
+static LIST_HEAD(ioc4_submodules);
+
+/* Register an IOC4 submodule */
+int
+ioc4_register_submodule(struct ioc4_submodule *is)
+{
+       struct ioc4_driver_data *idd;
+
+       mutex_lock(&ioc4_mutex);
+       list_add(&is->is_list, &ioc4_submodules);
+
+       /* Initialize submodule for each IOC4 */
+       if (!is->is_probe)
+               goto out;
+
+       list_for_each_entry(idd, &ioc4_devices, idd_list) {
+               if (is->is_probe(idd)) {
+                       printk(KERN_WARNING
+                              "%s: IOC4 submodule %s probe failed "
+                              "for pci_dev %s",
+                              __FUNCTION__, module_name(is->is_owner),
+                              pci_name(idd->idd_pdev));
+               }
+       }
+ out:
+       mutex_unlock(&ioc4_mutex);
+       return 0;
+}
+
+/* Unregister an IOC4 submodule */
+void
+ioc4_unregister_submodule(struct ioc4_submodule *is)
+{
+       struct ioc4_driver_data *idd;
+
+       mutex_lock(&ioc4_mutex);
+       list_del(&is->is_list);
+
+       /* Remove submodule for each IOC4 */
+       if (!is->is_remove)
+               goto out;
+
+       list_for_each_entry(idd, &ioc4_devices, idd_list) {
+               if (is->is_remove(idd)) {
+                       printk(KERN_WARNING
+                              "%s: IOC4 submodule %s remove failed "
+                              "for pci_dev %s.\n",
+                              __FUNCTION__, module_name(is->is_owner),
+                              pci_name(idd->idd_pdev));
+               }
+       }
+ out:
+       mutex_unlock(&ioc4_mutex);
+}
+
+/*********************
+ * Device management *
+ *********************/
+
+#define IOC4_CALIBRATE_LOW_LIMIT \
+       (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_LOW_MHZ)
+#define IOC4_CALIBRATE_HIGH_LIMIT \
+       (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_HIGH_MHZ)
+#define IOC4_CALIBRATE_DEFAULT \
+       (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_DEFAULT_MHZ)
+
+#define IOC4_CALIBRATE_END \
+       (IOC4_CALIBRATE_CYCLES + IOC4_CALIBRATE_DISCARD)
+
+#define IOC4_INT_OUT_MODE_TOGGLE 0x7   /* Toggle INT_OUT every COUNT+1 ticks */
+
+/* Determines external interrupt output clock period of the PCI bus an
+ * IOC4 is attached to.  This value can be used to determine the PCI
+ * bus speed.
+ *
+ * IOC4 has a design feature that various internal timers are derived from
+ * the PCI bus clock.  This causes IOC4 device drivers to need to take the
+ * bus speed into account when setting various register values (e.g. INT_OUT
+ * register COUNT field, UART divisors, etc).  Since this information is
+ * needed by several subdrivers, it is determined by the main IOC4 driver,
+ * even though the following code utilizes external interrupt registers
+ * to perform the speed calculation.
+ */
+static void
+ioc4_clock_calibrate(struct ioc4_driver_data *idd)
+{
+       union ioc4_int_out int_out;
+       union ioc4_gpcr gpcr;
+       unsigned int state, last_state = 1;
+       struct timespec start_ts, end_ts;
+       uint64_t start, end, period;
+       unsigned int count = 0;
+
+       /* Enable output */
+       gpcr.raw = 0;
+       gpcr.fields.dir = IOC4_GPCR_DIR_0;
+       gpcr.fields.int_out_en = 1;
+       writel(gpcr.raw, &idd->idd_misc_regs->gpcr_s.raw);
+
+       /* Reset to power-on state */
+       writel(0, &idd->idd_misc_regs->int_out.raw);
+       mmiowb();
+
+       /* Set up square wave */
+       int_out.raw = 0;
+       int_out.fields.count = IOC4_CALIBRATE_COUNT;
+       int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
+       int_out.fields.diag = 0;
+       writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
+       mmiowb();
+
+       /* Check square wave period averaged over some number of cycles */
+       do {
+               int_out.raw = readl(&idd->idd_misc_regs->int_out.raw);
+               state = int_out.fields.int_out;
+               if (!last_state && state) {
+                       count++;
+                       if (count == IOC4_CALIBRATE_END) {
+                               ktime_get_ts(&end_ts);
+                               break;
+                       } else if (count == IOC4_CALIBRATE_DISCARD)
+                               ktime_get_ts(&start_ts);
+               }
+               last_state = state;
+       } while (1);
+
+       /* Calculation rearranged to preserve intermediate precision.
+        * Logically:
+        * 1. "end - start" gives us the measurement period over all
+        *    the square wave cycles.
+        * 2. Divide by number of square wave cycles to get the period
+        *    of a square wave cycle.
+        * 3. Divide by 2*(int_out.fields.count+1), which is the formula
+        *    by which the IOC4 generates the square wave, to get the
+        *    period of an IOC4 INT_OUT count.
+        */
+       end = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec;
+       start = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec;
+       period = (end - start) /
+               (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1));
+
+       /* Bounds check the result. */
+       if (period > IOC4_CALIBRATE_LOW_LIMIT ||
+           period < IOC4_CALIBRATE_HIGH_LIMIT) {
+               printk(KERN_INFO
+                      "IOC4 %s: Clock calibration failed.  Assuming"
+                      "PCI clock is %d ns.\n",
+                      pci_name(idd->idd_pdev),
+                      IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR);
+               period = IOC4_CALIBRATE_DEFAULT;
+       } else {
+               u64 ns = period;
+
+               do_div(ns, IOC4_EXTINT_COUNT_DIVISOR);
+               printk(KERN_DEBUG
+                      "IOC4 %s: PCI clock is %lld ns.\n",
+                      pci_name(idd->idd_pdev), ns);
+       }
+
+       /* Remember results.  We store the extint clock period rather
+        * than the PCI clock period so that greater precision is
+        * retained.  Divide by IOC4_EXTINT_COUNT_DIVISOR to get
+        * PCI clock period.
+        */
+       idd->count_period = period;
+}
+
+/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT.
+ * Each brings out different combinations of IOC4 signals, thus.
+ * the IOC4 subdrivers need to know to which we're attached.
+ *
+ * We look for the presence of a SCSI (IO9) or SATA (IO10) controller
+ * on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
+ * If neither is present, it's a PCI-RT.
+ */
+static unsigned int
+ioc4_variant(struct ioc4_driver_data *idd)
+{
+       struct pci_dev *pdev = NULL;
+       int found = 0;
+
+       /* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */
+       do {
+               pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC,
+                                     PCI_DEVICE_ID_QLOGIC_ISP12160, pdev);
+               if (pdev &&
+                   idd->idd_pdev->bus->number == pdev->bus->number &&
+                   3 == PCI_SLOT(pdev->devfn))
+                       found = 1;
+               pci_dev_put(pdev);
+       } while (pdev && !found);
+       if (NULL != pdev)
+               return IOC4_VARIANT_IO9;
+
+       /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
+       pdev = NULL;
+       do {
+               pdev = pci_get_device(PCI_VENDOR_ID_VITESSE,
+                                     PCI_DEVICE_ID_VITESSE_VSC7174, pdev);
+               if (pdev &&
+                   idd->idd_pdev->bus->number == pdev->bus->number &&
+                   3 == PCI_SLOT(pdev->devfn))
+                       found = 1;
+               pci_dev_put(pdev);
+       } while (pdev && !found);
+       if (NULL != pdev)
+               return IOC4_VARIANT_IO10;
+
+       /* PCI-RT: No SCSI/SATA controller will be present */
+       return IOC4_VARIANT_PCI_RT;
+}
+
+/* Adds a new instance of an IOC4 card */
+static int
+ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+{
+       struct ioc4_driver_data *idd;
+       struct ioc4_submodule *is;
+       uint32_t pcmd;
+       int ret;
+
+       /* Enable IOC4 and take ownership of it */
+       if ((ret = pci_enable_device(pdev))) {
+               printk(KERN_WARNING
+                      "%s: Failed to enable IOC4 device for pci_dev %s.\n",
+                      __FUNCTION__, pci_name(pdev));
+               goto out;
+       }
+       pci_set_master(pdev);
+
+       /* Set up per-IOC4 data */
+       idd = kmalloc(sizeof(struct ioc4_driver_data), GFP_KERNEL);
+       if (!idd) {
+               printk(KERN_WARNING
+                      "%s: Failed to allocate IOC4 data for pci_dev %s.\n",
+                      __FUNCTION__, pci_name(pdev));
+               ret = -ENODEV;
+               goto out_idd;
+       }
+       idd->idd_pdev = pdev;
+       idd->idd_pci_id = pci_id;
+
+       /* Map IOC4 misc registers.  These are shared between subdevices
+        * so the main IOC4 module manages them.
+        */
+       idd->idd_bar0 = pci_resource_start(idd->idd_pdev, 0);
+       if (!idd->idd_bar0) {
+               printk(KERN_WARNING
+                      "%s: Unable to find IOC4 misc resource "
+                      "for pci_dev %s.\n",
+                      __FUNCTION__, pci_name(idd->idd_pdev));
+               ret = -ENODEV;
+               goto out_pci;
+       }
+       if (!request_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs),
+                           "ioc4_misc")) {
+               printk(KERN_WARNING
+                      "%s: Unable to request IOC4 misc region "
+                      "for pci_dev %s.\n",
+                      __FUNCTION__, pci_name(idd->idd_pdev));
+               ret = -ENODEV;
+               goto out_pci;
+       }
+       idd->idd_misc_regs = ioremap(idd->idd_bar0,
+                                    sizeof(struct ioc4_misc_regs));
+       if (!idd->idd_misc_regs) {
+               printk(KERN_WARNING
+                      "%s: Unable to remap IOC4 misc region "
+                      "for pci_dev %s.\n",
+                      __FUNCTION__, pci_name(idd->idd_pdev));
+               ret = -ENODEV;
+               goto out_misc_region;
+       }
+
+       /* Failsafe portion of per-IOC4 initialization */
+
+       /* Detect card variant */
+       idd->idd_variant = ioc4_variant(idd);
+       printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev),
+              idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" :
+              idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" :
+              idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown");
+
+       /* Initialize IOC4 */
+       pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd);
+       pci_write_config_dword(idd->idd_pdev, PCI_COMMAND,
+                              pcmd | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+
+       /* Determine PCI clock */
+       ioc4_clock_calibrate(idd);
+
+       /* Disable/clear all interrupts.  Need to do this here lest
+        * one submodule request the shared IOC4 IRQ, but interrupt
+        * is generated by a different subdevice.
+        */
+       /* Disable */
+       writel(~0, &idd->idd_misc_regs->other_iec.raw);
+       writel(~0, &idd->idd_misc_regs->sio_iec);
+       /* Clear (i.e. acknowledge) */
+       writel(~0, &idd->idd_misc_regs->other_ir.raw);
+       writel(~0, &idd->idd_misc_regs->sio_ir);
+
+       /* Track PCI-device specific data */
+       idd->idd_serial_data = NULL;
+       pci_set_drvdata(idd->idd_pdev, idd);
+
+       mutex_lock(&ioc4_mutex);
+       list_add_tail(&idd->idd_list, &ioc4_devices);
+
+       /* Add this IOC4 to all submodules */
+       list_for_each_entry(is, &ioc4_submodules, is_list) {
+               if (is->is_probe && is->is_probe(idd)) {
+                       printk(KERN_WARNING
+                              "%s: IOC4 submodule 0x%s probe failed "
+                              "for pci_dev %s.\n",
+                              __FUNCTION__, module_name(is->is_owner),
+                              pci_name(idd->idd_pdev));
+               }
+       }
+       mutex_unlock(&ioc4_mutex);
+
+       return 0;
+
+out_misc_region:
+       release_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
+out_pci:
+       kfree(idd);
+out_idd:
+       pci_disable_device(pdev);
+out:
+       return ret;
+}
+
+/* Removes a particular instance of an IOC4 card. */
+static void
+ioc4_remove(struct pci_dev *pdev)
+{
+       struct ioc4_submodule *is;
+       struct ioc4_driver_data *idd;
+
+       idd = pci_get_drvdata(pdev);
+
+       /* Remove this IOC4 from all submodules */
+       mutex_lock(&ioc4_mutex);
+       list_for_each_entry(is, &ioc4_submodules, is_list) {
+               if (is->is_remove && is->is_remove(idd)) {
+                       printk(KERN_WARNING
+                              "%s: IOC4 submodule 0x%s remove failed "
+                              "for pci_dev %s.\n",
+                              __FUNCTION__, module_name(is->is_owner),
+                              pci_name(idd->idd_pdev));
+               }
+       }
+       mutex_unlock(&ioc4_mutex);
+
+       /* Release resources */
+       iounmap(idd->idd_misc_regs);
+       if (!idd->idd_bar0) {
+               printk(KERN_WARNING
+                      "%s: Unable to get IOC4 misc mapping for pci_dev %s. "
+                      "Device removal may be incomplete.\n",
+                      __FUNCTION__, pci_name(idd->idd_pdev));
+       }
+       release_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
+
+       /* Disable IOC4 and relinquish */
+       pci_disable_device(pdev);
+
+       /* Remove and free driver data */
+       mutex_lock(&ioc4_mutex);
+       list_del(&idd->idd_list);
+       mutex_unlock(&ioc4_mutex);
+       kfree(idd);
+}
+
+static struct pci_device_id ioc4_id_table[] = {
+       {PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID,
+        PCI_ANY_ID, 0x0b4000, 0xFFFFFF},
+       {0}
+};
+
+static struct pci_driver ioc4_driver = {
+       .name = "IOC4",
+       .id_table = ioc4_id_table,
+       .probe = ioc4_probe,
+       .remove = ioc4_remove,
+};
+
+MODULE_DEVICE_TABLE(pci, ioc4_id_table);
+
+/*********************
+ * Module management *
+ *********************/
+
+/* Module load */
+static int __devinit
+ioc4_init(void)
+{
+       return pci_register_driver(&ioc4_driver);
+}
+
+/* Module unload */
+static void __devexit
+ioc4_exit(void)
+{
+       pci_unregister_driver(&ioc4_driver);
+}
+
+module_init(ioc4_init);
+module_exit(ioc4_exit);
+
+MODULE_AUTHOR("Brent Casavant - Silicon Graphics, Inc. <bcasavan@sgi.com>");
+MODULE_DESCRIPTION("PCI driver master module for SGI IOC4 Base-IO Card");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL(ioc4_register_submodule);
+EXPORT_SYMBOL(ioc4_unregister_submodule);
diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c
new file mode 100644 (file)
index 0000000..fdb7153
--- /dev/null
@@ -0,0 +1,395 @@
+/*-*-linux-c-*-*/
+
+/*
+  Copyright (C) 2006 Lennart Poettering <mzxreary (at) 0pointer (dot) de>
+
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+  02110-1301, USA.
+ */
+
+/*
+ * msi-laptop.c - MSI S270 laptop support. This laptop is sold under
+ * various brands, including "Cytron/TCM/Medion/Tchibo MD96100".
+ *
+ * This driver exports a few files in /sys/devices/platform/msi-laptop-pf/:
+ *
+ *   lcd_level - Screen brightness: contains a single integer in the
+ *   range 0..8. (rw)
+ *
+ *   auto_brightness - Enable automatic brightness control: contains
+ *   either 0 or 1. If set to 1 the hardware adjusts the screen
+ *   brightness automatically when the power cord is
+ *   plugged/unplugged. (rw)
+ *
+ *   wlan - WLAN subsystem enabled: contains either 0 or 1. (ro)
+ *
+ *   bluetooth - Bluetooth subsystem enabled: contains either 0 or 1
+ *   Please note that this file is constantly 0 if no Bluetooth
+ *   hardware is available. (ro)
+ *
+ * In addition to these platform device attributes the driver
+ * registers itself in the Linux backlight control subsystem and is
+ * available to userspace under /sys/class/backlight/msi-laptop-bl/.
+ *
+ * This driver might work on other laptops produced by MSI. If you
+ * want to try it you can pass force=1 as argument to the module which
+ * will force it to load even when the DMI data doesn't identify the
+ * laptop as MSI S270. YMMV.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/backlight.h>
+#include <linux/platform_device.h>
+#include <linux/autoconf.h>
+
+#define MSI_DRIVER_VERSION "0.5"
+
+#define MSI_LCD_LEVEL_MAX 9
+
+#define MSI_EC_COMMAND_WIRELESS 0x10
+#define MSI_EC_COMMAND_LCD_LEVEL 0x11
+
+static int force;
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
+
+static int auto_brightness;
+module_param(auto_brightness, int, 0);
+MODULE_PARM_DESC(auto_brightness, "Enable automatic brightness control (0: disabled; 1: enabled; 2: don't touch)");
+
+/* Hardware access */
+
+static int set_lcd_level(int level)
+{
+       u8 buf[2];
+
+       if (level < 0 || level >= MSI_LCD_LEVEL_MAX)
+               return -EINVAL;
+
+       buf[0] = 0x80;
+       buf[1] = (u8) (level*31);
+
+       return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0);
+}
+
+static int get_lcd_level(void)
+{
+       u8 wdata = 0, rdata;
+       int result;
+
+       result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1);
+       if (result < 0)
+               return result;
+
+       return (int) rdata / 31;
+}
+
+static int get_auto_brightness(void)
+{
+       u8 wdata = 4, rdata;
+       int result;
+
+       result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1);
+       if (result < 0)
+               return result;
+
+       return !!(rdata & 8);
+}
+
+static int set_auto_brightness(int enable)
+{
+       u8 wdata[2], rdata;
+       int result;
+
+       wdata[0] = 4;
+
+       result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1);
+       if (result < 0)
+               return result;
+
+       wdata[0] = 0x84;
+       wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
+
+       return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0);
+}
+
+static int get_wireless_state(int *wlan, int *bluetooth)
+{
+       u8 wdata = 0, rdata;
+       int result;
+
+       result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
+       if (result < 0)
+               return -1;
+
+       if (wlan)
+               *wlan = !!(rdata & 8);
+
+       if (bluetooth)
+               *bluetooth = !!(rdata & 128);
+
+       return 0;
+}
+
+/* Backlight device stuff */
+
+static int bl_get_brightness(struct backlight_device *b)
+{
+       return get_lcd_level();
+}
+
+
+static int bl_update_status(struct backlight_device *b)
+{
+       return set_lcd_level(b->props->brightness);
+}
+
+static struct backlight_properties msibl_props = {
+       .owner          = THIS_MODULE,
+       .get_brightness = bl_get_brightness,
+       .update_status  = bl_update_status,
+       .max_brightness = MSI_LCD_LEVEL_MAX-1,
+};
+
+static struct backlight_device *msibl_device;
+
+/* Platform device */
+
+static ssize_t show_wlan(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+
+       int ret, enabled;
+
+       ret = get_wireless_state(&enabled, NULL);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%i\n", enabled);
+}
+
+static ssize_t show_bluetooth(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+
+       int ret, enabled;
+
+       ret = get_wireless_state(NULL, &enabled);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%i\n", enabled);
+}
+
+static ssize_t show_lcd_level(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+
+       int ret;
+
+       ret = get_lcd_level();
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t store_lcd_level(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+
+       int level, ret;
+
+       if (sscanf(buf, "%i", &level) != 1 || (level < 0 || level >= MSI_LCD_LEVEL_MAX))
+               return -EINVAL;
+
+       ret = set_lcd_level(level);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static ssize_t show_auto_brightness(struct device *dev,
+       struct device_attribute *attr, char *buf)
+{
+
+       int ret;
+
+       ret = get_auto_brightness();
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t store_auto_brightness(struct device *dev,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+
+       int enable, ret;
+
+       if (sscanf(buf, "%i", &enable) != 1 || (enable != (enable & 1)))
+               return -EINVAL;
+
+       ret = set_auto_brightness(enable);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
+static DEVICE_ATTR(auto_brightness, 0644, show_auto_brightness, store_auto_brightness);
+static DEVICE_ATTR(bluetooth, 0444, show_bluetooth, NULL);
+static DEVICE_ATTR(wlan, 0444, show_wlan, NULL);
+
+static struct attribute *msipf_attributes[] = {
+       &dev_attr_lcd_level.attr,
+       &dev_attr_auto_brightness.attr,
+       &dev_attr_bluetooth.attr,
+       &dev_attr_wlan.attr,
+       NULL
+};
+
+static struct attribute_group msipf_attribute_group = {
+       .attrs = msipf_attributes
+};
+
+static struct platform_driver msipf_driver = {
+       .driver = {
+               .name = "msi-laptop-pf",
+               .owner = THIS_MODULE,
+       }
+};
+
+static struct platform_device *msipf_device;
+
+/* Initialization */
+
+static struct dmi_system_id __initdata msi_dmi_table[] = {
+       {
+               .ident = "MSI S270",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MS-1013"),
+               }
+       },
+       {
+               .ident = "Medion MD96100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "NOTEBOOK"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "SAM2000"),
+               }
+       },
+       { }
+};
+
+
+static int __init msi_init(void)
+{
+       int ret;
+
+       if (acpi_disabled)
+               return -ENODEV;
+
+       if (!force && !dmi_check_system(msi_dmi_table))
+               return -ENODEV;
+
+       if (auto_brightness < 0 || auto_brightness > 2)
+               return -EINVAL;
+
+       /* Register backlight stuff */
+
+       msibl_device = backlight_device_register("msi-laptop-bl", NULL, &msibl_props);
+       if (IS_ERR(msibl_device))
+               return PTR_ERR(msibl_device);
+
+       ret = platform_driver_register(&msipf_driver);
+       if (ret)
+               goto fail_backlight;
+
+       /* Register platform stuff */
+
+       msipf_device = platform_device_alloc("msi-laptop-pf", -1);
+       if (!msipf_device) {
+               ret = -ENOMEM;
+               goto fail_platform_driver;
+       }
+
+       ret = platform_device_add(msipf_device);
+       if (ret)
+               goto fail_platform_device1;
+
+       ret = sysfs_create_group(&msipf_device->dev.kobj, &msipf_attribute_group);
+       if (ret)
+               goto fail_platform_device2;
+
+       /* Disable automatic brightness control by default because
+        * this module was probably loaded to do brightness control in
+        * software. */
+
+       if (auto_brightness != 2)
+               set_auto_brightness(auto_brightness);
+
+       printk(KERN_INFO "msi-laptop: driver "MSI_DRIVER_VERSION" successfully loaded.\n");
+
+       return 0;
+
+fail_platform_device2:
+
+       platform_device_del(msipf_device);
+
+fail_platform_device1:
+
+       platform_device_put(msipf_device);
+
+fail_platform_driver:
+
+       platform_driver_unregister(&msipf_driver);
+
+fail_backlight:
+
+       backlight_device_unregister(msibl_device);
+
+       return ret;
+}
+
+static void __exit msi_cleanup(void)
+{
+
+       sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
+       platform_device_unregister(msipf_device);
+       platform_driver_unregister(&msipf_driver);
+       backlight_device_unregister(msibl_device);
+
+       /* Enable automatic brightness control again */
+       if (auto_brightness != 2)
+               set_auto_brightness(1);
+
+       printk(KERN_INFO "msi-laptop: driver unloaded.\n");
+}
+
+module_init(msi_init);
+module_exit(msi_cleanup);
+
+MODULE_AUTHOR("Lennart Poettering");
+MODULE_DESCRIPTION("MSI Laptop Support");
+MODULE_VERSION(MSI_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
index b124eee4eb10decbade62f95f03a396a546e8d71..1ec217433b4cdc30fcbebe35b1df22b31d3df1ad 100644 (file)
@@ -1706,14 +1706,15 @@ static void __b44_set_rx_mode(struct net_device *dev)
 
                __b44_set_mac_addr(bp);
 
-               if (dev->flags & IFF_ALLMULTI)
+               if ((dev->flags & IFF_ALLMULTI) ||
+                   (dev->mc_count > B44_MCAST_TABLE_SIZE))
                        val |= RXCONFIG_ALLMULTI;
                else
                        i = __b44_load_mcast(bp, dev);
 
-               for (; i < 64; i++) {
+               for (; i < 64; i++)
                        __b44_cam_write(bp, zero, i);
-               }
+
                bw32(bp, B44_RXCONFIG, val);
                val = br32(bp, B44_CAM_CTRL);
                bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
@@ -2055,7 +2056,7 @@ static int b44_read_eeprom(struct b44 *bp, u8 *data)
        u16 *ptr = (u16 *) data;
 
        for (i = 0; i < 128; i += 2)
-               ptr[i / 2] = readw(bp->regs + 4096 + i);
+               ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
 
        return 0;
 }
index e83bc825f6afc4d5346cbb3d70f999e4b5d44e75..32923162179ef8b45948149972343744144694ec 100644 (file)
@@ -1433,7 +1433,7 @@ void bond_alb_monitor(struct bonding *bond)
                 * write lock to protect from other code that also
                 * sets the promiscuity.
                 */
-               write_lock(&bond->curr_slave_lock);
+               write_lock_bh(&bond->curr_slave_lock);
 
                if (bond_info->primary_is_promisc &&
                    (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
@@ -1448,7 +1448,7 @@ void bond_alb_monitor(struct bonding *bond)
                        bond_info->primary_is_promisc = 0;
                }
 
-               write_unlock(&bond->curr_slave_lock);
+               write_unlock_bh(&bond->curr_slave_lock);
 
                if (bond_info->rlb_rebalance) {
                        bond_info->rlb_rebalance = 0;
index 23b451a8ae120f528fde09f0d131104645931fd3..b40724fc6b74e713a4bcee6213fe3b567def362d 100644 (file)
@@ -39,7 +39,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME       "ehea"
-#define DRV_VERSION    "EHEA_0028"
+#define DRV_VERSION    "EHEA_0034"
 
 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
        | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -50,6 +50,7 @@
 #define EHEA_MAX_ENTRIES_SQ  32767
 #define EHEA_MIN_ENTRIES_QP  127
 
+#define EHEA_SMALL_QUEUES
 #define EHEA_NUM_TX_QP 1
 
 #ifdef EHEA_SMALL_QUEUES
 #define EHEA_DEF_ENTRIES_RQ2    1023
 #define EHEA_DEF_ENTRIES_RQ3    1023
 #else
-#define EHEA_MAX_CQE_COUNT     32000
-#define EHEA_DEF_ENTRIES_SQ    16000
-#define EHEA_DEF_ENTRIES_RQ1   32080
-#define EHEA_DEF_ENTRIES_RQ2    4020
-#define EHEA_DEF_ENTRIES_RQ3    4020
+#define EHEA_MAX_CQE_COUNT      4080
+#define EHEA_DEF_ENTRIES_SQ     4080
+#define EHEA_DEF_ENTRIES_RQ1    8160
+#define EHEA_DEF_ENTRIES_RQ2    2040
+#define EHEA_DEF_ENTRIES_RQ3    2040
 #endif
 
 #define EHEA_MAX_ENTRIES_EQ 20
index c6b31775e26b7c5bb87684358afb596c1dfe38fc..eb7d44de59ff1aa91e9a833c3bab423430a7359d 100644 (file)
@@ -766,7 +766,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
                if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
                        if (!netif_carrier_ok(port->netdev)) {
                                ret = ehea_sense_port_attr(
-                                       adapter->port[portnum]);
+                                       port);
                                if (ret) {
                                        ehea_error("failed resensing port "
                                                   "attributes");
@@ -818,7 +818,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
                netif_stop_queue(port->netdev);
                break;
        default:
-               ehea_error("unknown event code %x", ec);
+               ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
                break;
        }
 }
@@ -1841,7 +1841,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (netif_msg_tx_queued(port)) {
                ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
-               ehea_dump(swqe, sizeof(*swqe), "swqe");
+               ehea_dump(swqe, 512, "swqe");
        }
 
        ehea_post_swqe(pr->qp, swqe);
index 4a85aca4c7e90c1890942754d2cd3e43cebb483e..0b51a8cea0775209ee9cc9e8c6c93277a05efbe9 100644 (file)
@@ -44,71 +44,99 @@ static inline u16 get_order_of_qentries(u16 queue_entries)
 #define H_ALL_RES_TYPE_MR        5
 #define H_ALL_RES_TYPE_MW        6
 
-static long ehea_hcall_9arg_9ret(unsigned long opcode,
-                                unsigned long arg1, unsigned long arg2,
-                                unsigned long arg3, unsigned long arg4,
-                                unsigned long arg5, unsigned long arg6,
-                                unsigned long arg7, unsigned long arg8,
-                                unsigned long arg9, unsigned long *out1,
-                                unsigned long *out2,unsigned long *out3,
-                                unsigned long *out4,unsigned long *out5,
-                                unsigned long *out6,unsigned long *out7,
-                                unsigned long *out8,unsigned long *out9)
+static long ehea_plpar_hcall_norets(unsigned long opcode,
+                                   unsigned long arg1,
+                                   unsigned long arg2,
+                                   unsigned long arg3,
+                                   unsigned long arg4,
+                                   unsigned long arg5,
+                                   unsigned long arg6,
+                                   unsigned long arg7)
 {
-       long hret;
+       long ret;
        int i, sleep_msecs;
 
        for (i = 0; i < 5; i++) {
-               hret = plpar_hcall_9arg_9ret(opcode,arg1, arg2, arg3, arg4,
-                                            arg5, arg6, arg7, arg8, arg9, out1,
-                                            out2, out3, out4, out5, out6, out7,
-                                            out8, out9);
-               if (H_IS_LONG_BUSY(hret)) {
-                       sleep_msecs = get_longbusy_msecs(hret);
+               ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
+                                        arg5, arg6, arg7);
+
+               if (H_IS_LONG_BUSY(ret)) {
+                       sleep_msecs = get_longbusy_msecs(ret);
                        msleep_interruptible(sleep_msecs);
                        continue;
                }
 
-               if (hret < H_SUCCESS)
-                       ehea_error("op=%lx hret=%lx "
-                                  "i1=%lx i2=%lx i3=%lx i4=%lx i5=%lx i6=%lx "
-                                  "i7=%lx i8=%lx i9=%lx "
-                                  "o1=%lx o2=%lx o3=%lx o4=%lx o5=%lx o6=%lx "
-                                  "o7=%lx o8=%lx o9=%lx",
-                                  opcode, hret, arg1, arg2, arg3, arg4, arg5,
-                                  arg6, arg7, arg8, arg9, *out1, *out2, *out3,
-                                  *out4, *out5, *out6, *out7, *out8, *out9);
-               return hret;
+               if (ret < H_SUCCESS)
+                       ehea_error("opcode=%lx ret=%lx"
+                                  " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+                                  " arg5=%lx arg6=%lx arg7=%lx ",
+                                  opcode, ret,
+                                  arg1, arg2, arg3, arg4, arg5,
+                                  arg6, arg7);
+
+               return ret;
        }
+
        return H_BUSY;
 }
 
-u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
-                        const u64 qp_handle, const u64 sel_mask, void *cb_addr)
+static long ehea_plpar_hcall9(unsigned long opcode,
+                             unsigned long *outs, /* array of 9 outputs */
+                             unsigned long arg1,
+                             unsigned long arg2,
+                             unsigned long arg3,
+                             unsigned long arg4,
+                             unsigned long arg5,
+                             unsigned long arg6,
+                             unsigned long arg7,
+                             unsigned long arg8,
+                             unsigned long arg9)
 {
-       u64 dummy;
+       long ret;
+       int i, sleep_msecs;
 
-       if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) {
-               ehea_error("not on pageboundary");
-               return H_PARAMETER;
+       for (i = 0; i < 5; i++) {
+               ret = plpar_hcall9(opcode, outs,
+                                  arg1, arg2, arg3, arg4, arg5,
+                                  arg6, arg7, arg8, arg9);
+
+               if (H_IS_LONG_BUSY(ret)) {
+                       sleep_msecs = get_longbusy_msecs(ret);
+                       msleep_interruptible(sleep_msecs);
+                       continue;
+               }
+
+               if (ret < H_SUCCESS)
+                       ehea_error("opcode=%lx ret=%lx"
+                                  " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+                                  " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
+                                  " arg9=%lx"
+                                  " out1=%lx out2=%lx out3=%lx out4=%lx"
+                                  " out5=%lx out6=%lx out7=%lx out8=%lx"
+                                  " out9=%lx",
+                                  opcode, ret,
+                                  arg1, arg2, arg3, arg4, arg5,
+                                  arg6, arg7, arg8, arg9,
+                                  outs[0], outs[1], outs[2], outs[3],
+                                  outs[4], outs[5], outs[6], outs[7],
+                                  outs[8]);
+
+               return ret;
        }
 
-       return ehea_hcall_9arg_9ret(H_QUERY_HEA_QP,
-                                   adapter_handle,             /* R4 */
-                                   qp_category,                /* R5 */
-                                   qp_handle,                  /* R6 */
-                                   sel_mask,                   /* R7 */
-                                   virt_to_abs(cb_addr),       /* R8 */
-                                   0, 0, 0, 0,                 /* R9-R12 */
-                                   &dummy,                     /* R4 */
-                                   &dummy,                     /* R5 */
-                                   &dummy,                     /* R6 */
-                                   &dummy,                     /* R7 */
-                                   &dummy,                     /* R8 */
-                                   &dummy,                     /* R9 */
-                                   &dummy,                     /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
+       return H_BUSY;
+}
+
+u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
+                        const u64 qp_handle, const u64 sel_mask, void *cb_addr)
+{
+       return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
+                                      adapter_handle,          /* R4 */
+                                      qp_category,             /* R5 */
+                                      qp_handle,               /* R6 */
+                                      sel_mask,                /* R7 */
+                                      virt_to_abs(cb_addr),    /* R8 */
+                                      0, 0);
 }
 
 /* input param R5 */
@@ -180,6 +208,7 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
                             u64 *qp_handle, struct h_epas *h_epas)
 {
        u64 hret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
 
        u64 allocate_controls =
            EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
@@ -219,45 +248,29 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
            EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
            | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
 
-       u64 r5_out = 0;
-       u64 r6_out = 0;
-       u64 r7_out = 0;
-       u64 r8_out = 0;
-       u64 r9_out = 0;
-       u64 g_la_user_out = 0;
-       u64 r11_out = 0;
-       u64 r12_out = 0;
-
-       hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
-                                   adapter_handle,             /* R4 */
-                                   allocate_controls,          /* R5 */
-                                   init_attr->send_cq_handle,  /* R6 */
-                                   init_attr->recv_cq_handle,  /* R7 */
-                                   init_attr->aff_eq_handle,   /* R8 */
-                                   r9_reg,                     /* R9 */
-                                   max_r10_reg,                /* R10 */
-                                   r11_in,                     /* R11 */
-                                   threshold,                  /* R12 */
-                                   qp_handle,                  /* R4 */
-                                   &r5_out,                    /* R5 */
-                                   &r6_out,                    /* R6 */
-                                   &r7_out,                    /* R7 */
-                                   &r8_out,                    /* R8 */
-                                   &r9_out,                    /* R9 */
-                                   &g_la_user_out,             /* R10 */
-                                   &r11_out,                   /* R11 */
-                                   &r12_out);                  /* R12 */
-
-       init_attr->qp_nr = (u32)r5_out;
+       hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+                                outs,
+                                adapter_handle,                /* R4 */
+                                allocate_controls,             /* R5 */
+                                init_attr->send_cq_handle,     /* R6 */
+                                init_attr->recv_cq_handle,     /* R7 */
+                                init_attr->aff_eq_handle,      /* R8 */
+                                r9_reg,                        /* R9 */
+                                max_r10_reg,                   /* R10 */
+                                r11_in,                        /* R11 */
+                                threshold);                    /* R12 */
+
+       *qp_handle = outs[0];
+       init_attr->qp_nr = (u32)outs[1];
 
        init_attr->act_nr_send_wqes =
-           (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, r6_out);
+           (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
        init_attr->act_nr_rwqes_rq1 =
-           (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, r6_out);
+           (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
        init_attr->act_nr_rwqes_rq2 =
-           (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, r6_out);
+           (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
        init_attr->act_nr_rwqes_rq3 =
-           (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, r6_out);
+           (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
 
        init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
        init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
@@ -265,25 +278,25 @@ u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
        init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
 
        init_attr->nr_sq_pages =
-           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, r8_out);
+           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
        init_attr->nr_rq1_pages =
-           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, r8_out);
+           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
        init_attr->nr_rq2_pages =
-           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, r9_out);
+           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
        init_attr->nr_rq3_pages =
-           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, r9_out);
+           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
 
        init_attr->liobn_sq =
-           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, r11_out);
+           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
        init_attr->liobn_rq1 =
-           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, r11_out);
+           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
        init_attr->liobn_rq2 =
-           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, r12_out);
+           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
        init_attr->liobn_rq3 =
-           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, r12_out);
+           (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
 
        if (!hret)
-               hcp_epas_ctor(h_epas, g_la_user_out, g_la_user_out);
+               hcp_epas_ctor(h_epas, outs[6], outs[6]);
 
        return hret;
 }
@@ -292,31 +305,24 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
                             struct ehea_cq_attr *cq_attr,
                             u64 *cq_handle, struct h_epas *epas)
 {
-       u64 hret, dummy, act_nr_of_cqes_out, act_pages_out;
-       u64 g_la_privileged_out, g_la_user_out;
-
-       hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
-                                   adapter_handle,             /* R4 */
-                                   H_ALL_RES_TYPE_CQ,          /* R5 */
-                                   cq_attr->eq_handle,         /* R6 */
-                                   cq_attr->cq_token,          /* R7 */
-                                   cq_attr->max_nr_of_cqes,    /* R8 */
-                                   0, 0, 0, 0,                 /* R9-R12 */
-                                   cq_handle,                  /* R4 */
-                                   &dummy,                     /* R5 */
-                                   &dummy,                     /* R6 */
-                                   &act_nr_of_cqes_out,        /* R7 */
-                                   &act_pages_out,             /* R8 */
-                                   &g_la_privileged_out,       /* R9 */
-                                   &g_la_user_out,             /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
-
-       cq_attr->act_nr_of_cqes = act_nr_of_cqes_out;
-       cq_attr->nr_pages = act_pages_out;
+       u64 hret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+                                outs,
+                                adapter_handle,                /* R4 */
+                                H_ALL_RES_TYPE_CQ,             /* R5 */
+                                cq_attr->eq_handle,            /* R6 */
+                                cq_attr->cq_token,             /* R7 */
+                                cq_attr->max_nr_of_cqes,       /* R8 */
+                                0, 0, 0, 0);                   /* R9-R12 */
+
+       *cq_handle = outs[0];
+       cq_attr->act_nr_of_cqes = outs[3];
+       cq_attr->nr_pages = outs[4];
 
        if (!hret)
-               hcp_epas_ctor(epas, g_la_privileged_out, g_la_user_out);
+               hcp_epas_ctor(epas, outs[5], outs[6]);
 
        return hret;
 }
@@ -361,9 +367,8 @@ u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
                             struct ehea_eq_attr *eq_attr, u64 *eq_handle)
 {
-       u64 hret, dummy, eq_liobn, allocate_controls;
-       u64 ist1_out, ist2_out, ist3_out, ist4_out;
-       u64 act_nr_of_eqes_out, act_pages_out;
+       u64 hret, allocate_controls;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
 
        /* resource type */
        allocate_controls =
@@ -372,27 +377,20 @@ u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
            | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
            | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
 
-       hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
-                                   adapter_handle,             /* R4 */
-                                   allocate_controls,          /* R5 */
-                                   eq_attr->max_nr_of_eqes,    /* R6 */
-                                   0, 0, 0, 0, 0, 0,           /* R7-R10 */
-                                   eq_handle,                  /* R4 */
-                                   &dummy,                     /* R5 */
-                                   &eq_liobn,                  /* R6 */
-                                   &act_nr_of_eqes_out,        /* R7 */
-                                   &act_pages_out,             /* R8 */
-                                   &ist1_out,                  /* R9 */
-                                   &ist2_out,                  /* R10 */
-                                   &ist3_out,                  /* R11 */
-                                   &ist4_out);                 /* R12 */
-
-       eq_attr->act_nr_of_eqes = act_nr_of_eqes_out;
-       eq_attr->nr_pages = act_pages_out;
-       eq_attr->ist1 = ist1_out;
-       eq_attr->ist2 = ist2_out;
-       eq_attr->ist3 = ist3_out;
-       eq_attr->ist4 = ist4_out;
+       hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+                                outs,
+                                adapter_handle,                /* R4 */
+                                allocate_controls,             /* R5 */
+                                eq_attr->max_nr_of_eqes,       /* R6 */
+                                0, 0, 0, 0, 0, 0);             /* R7-R10 */
+
+       *eq_handle = outs[0];
+       eq_attr->act_nr_of_eqes = outs[3];
+       eq_attr->nr_pages = outs[4];
+       eq_attr->ist1 = outs[5];
+       eq_attr->ist2 = outs[6];
+       eq_attr->ist3 = outs[7];
+       eq_attr->ist4 = outs[8];
 
        return hret;
 }
@@ -402,31 +400,22 @@ u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
                          void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
                          u16 *out_swr, u16 *out_rwr)
 {
-       u64 hret, dummy, act_out_swr, act_out_rwr;
-
-       if ((((u64)cb_addr) & (PAGE_SIZE - 1)) != 0) {
-               ehea_error("not on page boundary");
-               return H_PARAMETER;
-       }
-
-       hret = ehea_hcall_9arg_9ret(H_MODIFY_HEA_QP,
-                                   adapter_handle,             /* R4 */
-                                   (u64) cat,                  /* R5 */
-                                   qp_handle,                  /* R6 */
-                                   sel_mask,                   /* R7 */
-                                   virt_to_abs(cb_addr),       /* R8 */
-                                   0, 0, 0, 0,                 /* R9-R12 */
-                                   inv_attr_id,                /* R4 */
-                                   &dummy,                     /* R5 */
-                                   &dummy,                     /* R6 */
-                                   &act_out_swr,               /* R7 */
-                                   &act_out_rwr,               /* R8 */
-                                   proc_mask,                  /* R9 */
-                                   &dummy,                     /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
-       *out_swr = act_out_swr;
-       *out_rwr = act_out_rwr;
+       u64 hret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
+                                outs,
+                                adapter_handle,                /* R4 */
+                                (u64) cat,                     /* R5 */
+                                qp_handle,                     /* R6 */
+                                sel_mask,                      /* R7 */
+                                virt_to_abs(cb_addr),          /* R8 */
+                                0, 0, 0, 0);                   /* R9-R12 */
+
+       *inv_attr_id = outs[0];
+       *out_swr = outs[3];
+       *out_rwr = outs[4];
+       *proc_mask = outs[5];
 
        return hret;
 }
@@ -435,122 +424,81 @@ u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
                          const u8 queue_type, const u64 resource_handle,
                          const u64 log_pageaddr, u64 count)
 {
-       u64 dummy, reg_control;
+       u64  reg_control;
 
        reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
                    | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
 
-       return ehea_hcall_9arg_9ret(H_REGISTER_HEA_RPAGES,
-                                   adapter_handle,             /* R4 */
-                                   reg_control,                /* R5 */
-                                   resource_handle,            /* R6 */
-                                   log_pageaddr,               /* R7 */
-                                   count,                      /* R8 */
-                                   0, 0, 0, 0,                 /* R9-R12 */
-                                   &dummy,                     /* R4 */
-                                   &dummy,                     /* R5 */
-                                   &dummy,                     /* R6 */
-                                   &dummy,                     /* R7 */
-                                   &dummy,                     /* R8 */
-                                   &dummy,                     /* R9 */
-                                   &dummy,                     /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
+       return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
+                                      adapter_handle,          /* R4 */
+                                      reg_control,             /* R5 */
+                                      resource_handle,         /* R6 */
+                                      log_pageaddr,            /* R7 */
+                                      count,                   /* R8 */
+                                      0, 0);                   /* R9-R10 */
 }
 
 u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
                        const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
                        struct ehea_mr *mr)
 {
-       u64 hret, dummy, lkey_out;
-
-       hret = ehea_hcall_9arg_9ret(H_REGISTER_SMR,
-                                   adapter_handle       ,          /* R4 */
-                                   orig_mr_handle,                 /* R5 */
-                                   vaddr_in,                       /* R6 */
-                                   (((u64)access_ctrl) << 32ULL),  /* R7 */
-                                   pd,                             /* R8 */
-                                   0, 0, 0, 0,                     /* R9-R12 */
-                                   &mr->handle,                    /* R4 */
-                                   &dummy,                         /* R5 */
-                                   &lkey_out,                      /* R6 */
-                                   &dummy,                         /* R7 */
-                                   &dummy,                         /* R8 */
-                                   &dummy,                         /* R9 */
-                                   &dummy,                         /* R10 */
-                                   &dummy,                         /* R11 */
-                                   &dummy);                        /* R12 */
-       mr->lkey = (u32)lkey_out;
+       u64 hret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       hret = ehea_plpar_hcall9(H_REGISTER_SMR,
+                                outs,
+                                adapter_handle       ,          /* R4 */
+                                orig_mr_handle,                 /* R5 */
+                                vaddr_in,                       /* R6 */
+                                (((u64)access_ctrl) << 32ULL),  /* R7 */
+                                pd,                             /* R8 */
+                                0, 0, 0, 0);                    /* R9-R12 */
+
+       mr->handle = outs[0];
+       mr->lkey = (u32)outs[2];
 
        return hret;
 }
 
 u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
 {
-       u64 hret, dummy, ladr_next_sq_wqe_out;
-       u64 ladr_next_rq1_wqe_out, ladr_next_rq2_wqe_out, ladr_next_rq3_wqe_out;
-
-       hret = ehea_hcall_9arg_9ret(H_DISABLE_AND_GET_HEA,
-                                   adapter_handle,             /* R4 */
-                                   H_DISABLE_GET_EHEA_WQE_P,   /* R5 */
-                                   qp_handle,                  /* R6 */
-                                   0, 0, 0, 0, 0, 0,           /* R7-R12 */
-                                   &ladr_next_sq_wqe_out,      /* R4 */
-                                   &ladr_next_rq1_wqe_out,     /* R5 */
-                                   &ladr_next_rq2_wqe_out,     /* R6 */
-                                   &ladr_next_rq3_wqe_out,     /* R7 */
-                                   &dummy,                     /* R8 */
-                                   &dummy,                     /* R9 */
-                                   &dummy,                     /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
-       return hret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
+                                        outs,
+                                adapter_handle,                /* R4 */
+                                H_DISABLE_GET_EHEA_WQE_P,      /* R5 */
+                                qp_handle,                     /* R6 */
+                                0, 0, 0, 0, 0, 0);             /* R7-R12 */
 }
 
 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle)
 {
-       u64 dummy;
-
-       return ehea_hcall_9arg_9ret(H_FREE_RESOURCE,
-                                   adapter_handle,        /* R4 */
-                                   res_handle,            /* R5 */
-                                   0, 0, 0, 0, 0, 0, 0,   /* R6-R12 */
-                                   &dummy,                /* R4 */
-                                   &dummy,                /* R5 */
-                                   &dummy,                /* R6 */
-                                   &dummy,                /* R7 */
-                                   &dummy,                /* R8 */
-                                   &dummy,                /* R9 */
-                                   &dummy,                /* R10 */
-                                   &dummy,                /* R11 */
-                                   &dummy);               /* R12 */
+       return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
+                                      adapter_handle,     /* R4 */
+                                      res_handle,         /* R5 */
+                                      0, 0, 0, 0, 0);     /* R6-R10 */
 }
 
 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
                             const u64 length, const u32 access_ctrl,
                             const u32 pd, u64 *mr_handle, u32 *lkey)
 {
-       u64 hret, dummy, lkey_out;
-
-       hret = ehea_hcall_9arg_9ret(H_ALLOC_HEA_RESOURCE,
-                                   adapter_handle,                /* R4 */
-                                   5,                             /* R5 */
-                                   vaddr,                         /* R6 */
-                                   length,                        /* R7 */
-                                   (((u64) access_ctrl) << 32ULL),/* R8 */
-                                   pd,                            /* R9 */
-                                   0, 0, 0,                       /* R10-R12 */
-                                   mr_handle,                     /* R4 */
-                                   &dummy,                        /* R5 */
-                                   &lkey_out,                     /* R6 */
-                                   &dummy,                        /* R7 */
-                                   &dummy,                        /* R8 */
-                                   &dummy,                        /* R9 */
-                                   &dummy,                        /* R10 */
-                                   &dummy,                        /* R11 */
-                                   &dummy);                       /* R12 */
-       *lkey = (u32) lkey_out;
-
+       u64 hret;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+
+       hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
+                                outs,
+                                adapter_handle,                   /* R4 */
+                                5,                                /* R5 */
+                                vaddr,                            /* R6 */
+                                length,                           /* R7 */
+                                (((u64) access_ctrl) << 32ULL),   /* R8 */
+                                pd,                               /* R9 */
+                                0, 0, 0);                         /* R10-R12 */
+
+       *mr_handle = outs[0];
+       *lkey = (u32)outs[2];
        return hret;
 }
 
@@ -570,23 +518,14 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
 
 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
 {
-       u64 hret, dummy, cb_logaddr;
+       u64 hret, cb_logaddr;
 
        cb_logaddr = virt_to_abs(cb_addr);
 
-       hret = ehea_hcall_9arg_9ret(H_QUERY_HEA,
-                                   adapter_handle,             /* R4 */
-                                   cb_logaddr,                 /* R5 */
-                                   0, 0, 0, 0, 0, 0, 0,        /* R6-R12 */
-                                   &dummy,                     /* R4 */
-                                   &dummy,                     /* R5 */
-                                   &dummy,                     /* R6 */
-                                   &dummy,                     /* R7 */
-                                   &dummy,                     /* R8 */
-                                   &dummy,                     /* R9 */
-                                   &dummy,                     /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
+       hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
+                                      adapter_handle,          /* R4 */
+                                      cb_logaddr,              /* R5 */
+                                      0, 0, 0, 0, 0);          /* R6-R10 */
 #ifdef DEBUG
        ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
 #endif
@@ -597,36 +536,28 @@ u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
                           const u8 cb_cat, const u64 select_mask,
                           void *cb_addr)
 {
-       u64 port_info, dummy;
+       u64 port_info;
        u64 cb_logaddr = virt_to_abs(cb_addr);
        u64 arr_index = 0;
 
        port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
                  | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
 
-       return ehea_hcall_9arg_9ret(H_QUERY_HEA_PORT,
-                                   adapter_handle,             /* R4 */
-                                   port_info,                  /* R5 */
-                                   select_mask,                /* R6 */
-                                   arr_index,                  /* R7 */
-                                   cb_logaddr,                 /* R8 */
-                                   0, 0, 0, 0,                 /* R9-R12 */
-                                   &dummy,                     /* R4 */
-                                   &dummy,                     /* R5 */
-                                   &dummy,                     /* R6 */
-                                   &dummy,                     /* R7 */
-                                   &dummy,                     /* R8 */
-                                   &dummy,                     /* R9 */
-                                   &dummy,                     /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
+       return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
+                                      adapter_handle,          /* R4 */
+                                      port_info,               /* R5 */
+                                      select_mask,             /* R6 */
+                                      arr_index,               /* R7 */
+                                      cb_logaddr,              /* R8 */
+                                      0, 0);                   /* R9-R10 */
 }
 
 u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
                            const u8 cb_cat, const u64 select_mask,
                            void *cb_addr)
 {
-       u64 port_info, dummy, inv_attr_ident, proc_mask;
+       u64 outs[PLPAR_HCALL9_BUFSIZE];
+       u64 port_info;
        u64 arr_index = 0;
        u64 cb_logaddr = virt_to_abs(cb_addr);
 
@@ -635,29 +566,21 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
 #ifdef DEBUG
        ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
 #endif
-       return ehea_hcall_9arg_9ret(H_MODIFY_HEA_PORT,
-                                   adapter_handle,             /* R4 */
-                                   port_info,                  /* R5 */
-                                   select_mask,                /* R6 */
-                                   arr_index,                  /* R7 */
-                                   cb_logaddr,                 /* R8 */
-                                   0, 0, 0, 0,                 /* R9-R12 */
-                                   &inv_attr_ident,            /* R4 */
-                                   &proc_mask,                 /* R5 */
-                                   &dummy,                     /* R6 */
-                                   &dummy,                     /* R7 */
-                                   &dummy,                     /* R8 */
-                                   &dummy,                     /* R9 */
-                                   &dummy,                     /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
+       return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
+                                outs,
+                                adapter_handle,                /* R4 */
+                                port_info,                     /* R5 */
+                                select_mask,                   /* R6 */
+                                arr_index,                     /* R7 */
+                                cb_logaddr,                    /* R8 */
+                                0, 0, 0, 0);                   /* R9-R12 */
 }
 
 u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
                          const u8 reg_type, const u64 mc_mac_addr,
                          const u16 vlan_id, const u32 hcall_id)
 {
-       u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id, dummy;
+       u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
        u64 mac_addr = mc_mac_addr >> 16;
 
        r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
@@ -665,41 +588,21 @@ u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
        r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
        r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
 
-       return ehea_hcall_9arg_9ret(hcall_id,
-                                   adapter_handle,             /* R4 */
-                                   r5_port_num,                /* R5 */
-                                   r6_reg_type,                /* R6 */
-                                   r7_mc_mac_addr,             /* R7 */
-                                   r8_vlan_id,                 /* R8 */
-                                   0, 0, 0, 0,                 /* R9-R12 */
-                                   &dummy,                     /* R4 */
-                                   &dummy,                     /* R5 */
-                                   &dummy,                     /* R6 */
-                                   &dummy,                     /* R7 */
-                                   &dummy,                     /* R8 */
-                                   &dummy,                     /* R9 */
-                                   &dummy,                     /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
+       return ehea_plpar_hcall_norets(hcall_id,
+                                      adapter_handle,          /* R4 */
+                                      r5_port_num,             /* R5 */
+                                      r6_reg_type,             /* R6 */
+                                      r7_mc_mac_addr,          /* R7 */
+                                      r8_vlan_id,              /* R8 */
+                                      0, 0);                   /* R9-R12 */
 }
 
 u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
                        const u64 event_mask)
 {
-       u64 dummy;
-
-       return ehea_hcall_9arg_9ret(H_RESET_EVENTS,
-                                   adapter_handle,             /* R4 */
-                                   neq_handle,                 /* R5 */
-                                   event_mask,                 /* R6 */
-                                   0, 0, 0, 0, 0, 0,           /* R7-R12 */
-                                   &dummy,                     /* R4 */
-                                   &dummy,                     /* R5 */
-                                   &dummy,                     /* R6 */
-                                   &dummy,                     /* R7 */
-                                   &dummy,                     /* R8 */
-                                   &dummy,                     /* R9 */
-                                   &dummy,                     /* R10 */
-                                   &dummy,                     /* R11 */
-                                   &dummy);                    /* R12 */
+       return ehea_plpar_hcall_norets(H_RESET_EVENTS,
+                                      adapter_handle,          /* R4 */
+                                      neq_handle,              /* R5 */
+                                      event_mask,              /* R6 */
+                                      0, 0, 0, 0);             /* R7-R12 */
 }
index 8cc3c331aca84ff7aabcd5fec8a0deeb68e2a42e..b7b8bc2a6307ef7215df4d4390ff4c53f8fc9934 100644 (file)
@@ -162,9 +162,9 @@ static char *version =
 #include <linux/skbuff.h>
 #include <linux/bitops.h>
 #include <linux/jiffies.h>
+#include <linux/io.h>
 
 #include <asm/system.h>
-#include <asm/io.h>
 #include <asm/dma.h>
 
 
index 99b7a411db282e54e604d8ec58cab7ef24943ac4..c5ed635bce36dc754416cb2f53827f82e5aabd3f 100644 (file)
@@ -2497,6 +2497,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
        u8 __iomem *base = get_hwbase(dev);
        u32 events;
        int i;
+       unsigned long flags;
 
        dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
 
@@ -2508,16 +2509,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
                if (!(events & np->irqmask))
                        break;
 
-               spin_lock_irq(&np->lock);
+               spin_lock_irqsave(&np->lock, flags);
                nv_tx_done(dev);
-               spin_unlock_irq(&np->lock);
+               spin_unlock_irqrestore(&np->lock, flags);
 
                if (events & (NVREG_IRQ_TX_ERR)) {
                        dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
                                                dev->name, events);
                }
                if (i > max_interrupt_work) {
-                       spin_lock_irq(&np->lock);
+                       spin_lock_irqsave(&np->lock, flags);
                        /* disable interrupts on the nic */
                        writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
                        pci_push(base);
@@ -2527,7 +2528,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
                                mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
                        }
                        printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
-                       spin_unlock_irq(&np->lock);
+                       spin_unlock_irqrestore(&np->lock, flags);
                        break;
                }
 
@@ -2601,6 +2602,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
        u8 __iomem *base = get_hwbase(dev);
        u32 events;
        int i;
+       unsigned long flags;
 
        dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
 
@@ -2614,14 +2616,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
 
                nv_rx_process(dev, dev->weight);
                if (nv_alloc_rx(dev)) {
-                       spin_lock_irq(&np->lock);
+                       spin_lock_irqsave(&np->lock, flags);
                        if (!np->in_shutdown)
                                mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
-                       spin_unlock_irq(&np->lock);
+                       spin_unlock_irqrestore(&np->lock, flags);
                }
 
                if (i > max_interrupt_work) {
-                       spin_lock_irq(&np->lock);
+                       spin_lock_irqsave(&np->lock, flags);
                        /* disable interrupts on the nic */
                        writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
                        pci_push(base);
@@ -2631,7 +2633,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
                                mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
                        }
                        printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
-                       spin_unlock_irq(&np->lock);
+                       spin_unlock_irqrestore(&np->lock, flags);
                        break;
                }
        }
@@ -2648,6 +2650,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
        u8 __iomem *base = get_hwbase(dev);
        u32 events;
        int i;
+       unsigned long flags;
 
        dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
 
@@ -2660,14 +2663,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
                        break;
 
                if (events & NVREG_IRQ_LINK) {
-                       spin_lock_irq(&np->lock);
+                       spin_lock_irqsave(&np->lock, flags);
                        nv_link_irq(dev);
-                       spin_unlock_irq(&np->lock);
+                       spin_unlock_irqrestore(&np->lock, flags);
                }
                if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
-                       spin_lock_irq(&np->lock);
+                       spin_lock_irqsave(&np->lock, flags);
                        nv_linkchange(dev);
-                       spin_unlock_irq(&np->lock);
+                       spin_unlock_irqrestore(&np->lock, flags);
                        np->link_timeout = jiffies + LINK_TIMEOUT;
                }
                if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2675,7 +2678,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
                                                dev->name, events);
                }
                if (i > max_interrupt_work) {
-                       spin_lock_irq(&np->lock);
+                       spin_lock_irqsave(&np->lock, flags);
                        /* disable interrupts on the nic */
                        writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
                        pci_push(base);
@@ -2685,7 +2688,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
                                mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
                        }
                        printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
-                       spin_unlock_irq(&np->lock);
+                       spin_unlock_irqrestore(&np->lock, flags);
                        break;
                }
 
index 4bac3cd8f235f0ff70107ced255826785e8bde49..2802db23d3cb4be01239f580f91eb54c3b12010a 100644 (file)
@@ -213,6 +213,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
                }
 
                free_index = pool->consumer_index++ % pool->size;
+               pool->consumer_index = free_index;
                index = pool->free_map[free_index];
 
                ibmveth_assert(index != IBM_VETH_INVALID_MAP);
@@ -238,7 +239,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
                if(lpar_rc != H_SUCCESS) {
                        pool->free_map[free_index] = index;
                        pool->skbuff[index] = NULL;
-                       pool->consumer_index--;
+                       if (pool->consumer_index == 0)
+                               pool->consumer_index = pool->size - 1;
+                       else
+                               pool->consumer_index--;
                        dma_unmap_single(&adapter->vdev->dev,
                                        pool->dma_addr[index], pool->buff_size,
                                        DMA_FROM_DEVICE);
@@ -326,6 +330,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
                         DMA_FROM_DEVICE);
 
        free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size;
+       adapter->rx_buff_pool[pool].producer_index = free_index;
        adapter->rx_buff_pool[pool].free_map[free_index] = index;
 
        mb();
@@ -437,6 +442,31 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
                                                 &adapter->rx_buff_pool[i]);
 }
 
+static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
+        union ibmveth_buf_desc rxq_desc, u64 mac_address)
+{
+       int rc, try_again = 1;
+
+       /* After a kexec the adapter will still be open, so our attempt to
+       * open it will fail. So if we get a failure we free the adapter and
+       * try again, but only once. */
+retry:
+       rc = h_register_logical_lan(adapter->vdev->unit_address,
+                                   adapter->buffer_list_dma, rxq_desc.desc,
+                                   adapter->filter_list_dma, mac_address);
+
+       if (rc != H_SUCCESS && try_again) {
+               do {
+                       rc = h_free_logical_lan(adapter->vdev->unit_address);
+               } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
+
+               try_again = 0;
+               goto retry;
+       }
+
+       return rc;
+}
+
 static int ibmveth_open(struct net_device *netdev)
 {
        struct ibmveth_adapter *adapter = netdev->priv;
@@ -502,12 +532,9 @@ static int ibmveth_open(struct net_device *netdev)
        ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
        ibmveth_debug_printk("receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
 
+       h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
 
-       lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
-                                        adapter->buffer_list_dma,
-                                        rxq_desc.desc,
-                                        adapter->filter_list_dma,
-                                        mac_address);
+       lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
 
        if(lpar_rc != H_SUCCESS) {
                ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
@@ -905,6 +932,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
        return -EINVAL;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ibmveth_poll_controller(struct net_device *dev)
+{
+       ibmveth_replenish_task(dev->priv);
+       ibmveth_interrupt(dev->irq, dev);
+}
+#endif
+
 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
 {
        int rc, i;
@@ -977,6 +1012,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
        netdev->ethtool_ops           = &netdev_ethtool_ops;
        netdev->change_mtu         = ibmveth_change_mtu;
        SET_NETDEV_DEV(netdev, &dev->dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       netdev->poll_controller = ibmveth_poll_controller;
+#endif
        netdev->features |= NETIF_F_LLTX;
        spin_lock_init(&adapter->stats_lock);
 
@@ -1132,7 +1170,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
 {
        struct proc_dir_entry *entry;
        if (ibmveth_proc_dir) {
-               entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir);
+               char u_addr[10];
+               sprintf(u_addr, "%x", adapter->vdev->unit_address);
+               entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
                if (!entry) {
                        ibmveth_error_printk("Cannot create adapter proc entry");
                } else {
@@ -1147,7 +1187,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
 {
        if (ibmveth_proc_dir) {
-               remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir);
+               char u_addr[10];
+               sprintf(u_addr, "%x", adapter->vdev->unit_address);
+               remove_proc_entry(u_addr, ibmveth_proc_dir);
        }
 }
 
index 2ffa3a59e704785ca36f8deed2a0238a1a236705..9997081c6daea8458fd1b1fad296733f55885dcd 100644 (file)
@@ -2155,7 +2155,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
        for (offset = ETH_MIB_BAD_OCTETS_RECEIVED;
                        offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS;
                        offset += 4)
-               *(u32 *)((char *)p + offset) = read_mib(mp, offset);
+               *(u32 *)((char *)p + offset) += read_mib(mp, offset);
 
        p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW);
        p->good_octets_sent +=
@@ -2164,7 +2164,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
        for (offset = ETH_MIB_GOOD_FRAMES_SENT;
                        offset <= ETH_MIB_LATE_COLLISION;
                        offset += 4)
-               *(u32 *)((char *)p + offset) = read_mib(mp, offset);
+               *(u32 *)((char *)p + offset) += read_mib(mp, offset);
 }
 
 /*
index a4a58e4e93a1e29e9de32e66ff6f166e83872669..e7e414928f89459dab22c753be80eb7a477590b4 100644 (file)
@@ -43,7 +43,7 @@
 #include "skge.h"
 
 #define DRV_NAME               "skge"
-#define DRV_VERSION            "1.8"
+#define DRV_VERSION            "1.9"
 #define PFX                    DRV_NAME " "
 
 #define DEFAULT_TX_RING_SIZE   128
@@ -197,8 +197,8 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
                else if (hw->chip_id == CHIP_ID_YUKON)
                        supported &= ~SUPPORTED_1000baseT_Half;
        } else
-               supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
-                       | SUPPORTED_Autoneg;
+               supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half
+                       | SUPPORTED_FIBRE | SUPPORTED_Autoneg;
 
        return supported;
 }
@@ -487,31 +487,37 @@ static void skge_get_pauseparam(struct net_device *dev,
 {
        struct skge_port *skge = netdev_priv(dev);
 
-       ecmd->tx_pause = (skge->flow_control == FLOW_MODE_LOC_SEND)
-               || (skge->flow_control == FLOW_MODE_SYMMETRIC);
-       ecmd->rx_pause = (skge->flow_control == FLOW_MODE_REM_SEND)
-               || (skge->flow_control == FLOW_MODE_SYMMETRIC);
+       ecmd->rx_pause = (skge->flow_control == FLOW_MODE_SYMMETRIC)
+               || (skge->flow_control == FLOW_MODE_SYM_OR_REM);
+       ecmd->tx_pause = ecmd->rx_pause || (skge->flow_control == FLOW_MODE_LOC_SEND);
 
-       ecmd->autoneg = skge->autoneg;
+       ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
 }
 
 static int skge_set_pauseparam(struct net_device *dev,
                               struct ethtool_pauseparam *ecmd)
 {
        struct skge_port *skge = netdev_priv(dev);
+       struct ethtool_pauseparam old;
 
-       skge->autoneg = ecmd->autoneg;
-       if (ecmd->rx_pause && ecmd->tx_pause)
-               skge->flow_control = FLOW_MODE_SYMMETRIC;
-       else if (ecmd->rx_pause && !ecmd->tx_pause)
-               skge->flow_control = FLOW_MODE_REM_SEND;
-       else if (!ecmd->rx_pause && ecmd->tx_pause)
-               skge->flow_control = FLOW_MODE_LOC_SEND;
-       else
-               skge->flow_control = FLOW_MODE_NONE;
+       skge_get_pauseparam(dev, &old);
+
+       if (ecmd->autoneg != old.autoneg)
+               skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC;
+       else {
+               if (ecmd->rx_pause && ecmd->tx_pause)
+                       skge->flow_control = FLOW_MODE_SYMMETRIC;
+               else if (ecmd->rx_pause && !ecmd->tx_pause)
+                       skge->flow_control = FLOW_MODE_SYM_OR_REM;
+               else if (!ecmd->rx_pause && ecmd->tx_pause)
+                       skge->flow_control = FLOW_MODE_LOC_SEND;
+               else
+                       skge->flow_control = FLOW_MODE_NONE;
+       }
 
        if (netif_running(dev))
                skge_phy_reset(skge);
+
        return 0;
 }
 
@@ -854,6 +860,23 @@ static int skge_rx_fill(struct net_device *dev)
        return 0;
 }
 
+static const char *skge_pause(enum pause_status status)
+{
+       switch(status) {
+       case FLOW_STAT_NONE:
+               return "none";
+       case FLOW_STAT_REM_SEND:
+               return "rx only";
+       case FLOW_STAT_LOC_SEND:
+               return "tx_only";
+       case FLOW_STAT_SYMMETRIC:               /* Both station may send PAUSE */
+               return "both";
+       default:
+               return "indeterminated";
+       }
+}
+
+
 static void skge_link_up(struct skge_port *skge)
 {
        skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
@@ -862,16 +885,13 @@ static void skge_link_up(struct skge_port *skge)
        netif_carrier_on(skge->netdev);
        netif_wake_queue(skge->netdev);
 
-       if (netif_msg_link(skge))
+       if (netif_msg_link(skge)) {
                printk(KERN_INFO PFX
                       "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
                       skge->netdev->name, skge->speed,
                       skge->duplex == DUPLEX_FULL ? "full" : "half",
-                      (skge->flow_control == FLOW_MODE_NONE) ? "none" :
-                      (skge->flow_control == FLOW_MODE_LOC_SEND) ? "tx only" :
-                      (skge->flow_control == FLOW_MODE_REM_SEND) ? "rx only" :
-                      (skge->flow_control == FLOW_MODE_SYMMETRIC) ? "tx and rx" :
-                      "unknown");
+                      skge_pause(skge->flow_status));
+       }
 }
 
 static void skge_link_down(struct skge_port *skge)
@@ -884,6 +904,29 @@ static void skge_link_down(struct skge_port *skge)
                printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
 }
 
+
+static void xm_link_down(struct skge_hw *hw, int port)
+{
+       struct net_device *dev = hw->dev[port];
+       struct skge_port *skge = netdev_priv(dev);
+       u16 cmd, msk;
+
+       if (hw->phy_type == SK_PHY_XMAC) {
+               msk = xm_read16(hw, port, XM_IMSK);
+               msk |= XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND;
+               xm_write16(hw, port, XM_IMSK, msk);
+       }
+
+       cmd = xm_read16(hw, port, XM_MMU_CMD);
+       cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
+       xm_write16(hw, port, XM_MMU_CMD, cmd);
+       /* dummy read to ensure writing */
+       (void) xm_read16(hw, port, XM_MMU_CMD);
+
+       if (netif_carrier_ok(dev))
+               skge_link_down(skge);
+}
+
 static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
 {
        int i;
@@ -992,7 +1035,15 @@ static const u16 phy_pause_map[] = {
        [FLOW_MODE_NONE] =      0,
        [FLOW_MODE_LOC_SEND] =  PHY_AN_PAUSE_ASYM,
        [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
-       [FLOW_MODE_REM_SEND]  = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
+       [FLOW_MODE_SYM_OR_REM]  = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
+};
+
+/* special defines for FIBER (88E1011S only) */
+static const u16 fiber_pause_map[] = {
+       [FLOW_MODE_NONE]        = PHY_X_P_NO_PAUSE,
+       [FLOW_MODE_LOC_SEND]    = PHY_X_P_ASYM_MD,
+       [FLOW_MODE_SYMMETRIC]   = PHY_X_P_SYM_MD,
+       [FLOW_MODE_SYM_OR_REM]  = PHY_X_P_BOTH_MD,
 };
 
 
@@ -1008,14 +1059,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
        status = xm_phy_read(hw, port, PHY_BCOM_STAT);
 
        if ((status & PHY_ST_LSYNC) == 0) {
-               u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
-               cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
-               xm_write16(hw, port, XM_MMU_CMD, cmd);
-               /* dummy read to ensure writing */
-               (void) xm_read16(hw, port, XM_MMU_CMD);
-
-               if (netif_carrier_ok(dev))
-                       skge_link_down(skge);
+               xm_link_down(hw, port);
                return;
        }
 
@@ -1048,20 +1092,19 @@ static void bcom_check_link(struct skge_hw *hw, int port)
                        return;
                }
 
-
                /* We are using IEEE 802.3z/D5.0 Table 37-4 */
                switch (aux & PHY_B_AS_PAUSE_MSK) {
                case PHY_B_AS_PAUSE_MSK:
-                       skge->flow_control = FLOW_MODE_SYMMETRIC;
+                       skge->flow_status = FLOW_STAT_SYMMETRIC;
                        break;
                case PHY_B_AS_PRR:
-                       skge->flow_control = FLOW_MODE_REM_SEND;
+                       skge->flow_status = FLOW_STAT_REM_SEND;
                        break;
                case PHY_B_AS_PRT:
-                       skge->flow_control = FLOW_MODE_LOC_SEND;
+                       skge->flow_status = FLOW_STAT_LOC_SEND;
                        break;
                default:
-                       skge->flow_control = FLOW_MODE_NONE;
+                       skge->flow_status = FLOW_STAT_NONE;
                }
                skge->speed = SPEED_1000;
        }
@@ -1191,17 +1234,7 @@ static void xm_phy_init(struct skge_port *skge)
                if (skge->advertising & ADVERTISED_1000baseT_Full)
                        ctrl |= PHY_X_AN_FD;
 
-               switch(skge->flow_control) {
-               case FLOW_MODE_NONE:
-                       ctrl |= PHY_X_P_NO_PAUSE;
-                       break;
-               case FLOW_MODE_LOC_SEND:
-                       ctrl |= PHY_X_P_ASYM_MD;
-                       break;
-               case FLOW_MODE_SYMMETRIC:
-                       ctrl |= PHY_X_P_BOTH_MD;
-                       break;
-               }
+               ctrl |= fiber_pause_map[skge->flow_control];
 
                xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
 
@@ -1235,14 +1268,7 @@ static void xm_check_link(struct net_device *dev)
        status = xm_phy_read(hw, port, PHY_XMAC_STAT);
 
        if ((status & PHY_ST_LSYNC) == 0) {
-               u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
-               cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
-               xm_write16(hw, port, XM_MMU_CMD, cmd);
-               /* dummy read to ensure writing */
-               (void) xm_read16(hw, port, XM_MMU_CMD);
-
-               if (netif_carrier_ok(dev))
-                       skge_link_down(skge);
+               xm_link_down(hw, port);
                return;
        }
 
@@ -1276,15 +1302,20 @@ static void xm_check_link(struct net_device *dev)
                }
 
                /* We are using IEEE 802.3z/D5.0 Table 37-4 */
-               if (lpa & PHY_X_P_SYM_MD)
-                       skge->flow_control = FLOW_MODE_SYMMETRIC;
-               else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
-                       skge->flow_control = FLOW_MODE_REM_SEND;
-               else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
-                       skge->flow_control = FLOW_MODE_LOC_SEND;
+               if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
+                    skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
+                   (lpa & PHY_X_P_SYM_MD))
+                       skge->flow_status = FLOW_STAT_SYMMETRIC;
+               else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
+                        (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
+                       /* Enable PAUSE receive, disable PAUSE transmit */
+                       skge->flow_status  = FLOW_STAT_REM_SEND;
+               else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
+                        (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
+                       /* Disable PAUSE receive, enable PAUSE transmit */
+                       skge->flow_status = FLOW_STAT_LOC_SEND;
                else
-                       skge->flow_control = FLOW_MODE_NONE;
-
+                       skge->flow_status = FLOW_STAT_NONE;
 
                skge->speed = SPEED_1000;
        }
@@ -1568,6 +1599,10 @@ static void genesis_mac_intr(struct skge_hw *hw, int port)
                printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
                       skge->netdev->name, status);
 
+       if (hw->phy_type == SK_PHY_XMAC &&
+           (status & (XM_IS_INP_ASS | XM_IS_LIPA_RC)))
+               xm_link_down(hw, port);
+
        if (status & XM_IS_TXF_UR) {
                xm_write32(hw, port, XM_MODE, XM_MD_FTF);
                ++skge->net_stats.tx_fifo_errors;
@@ -1582,7 +1617,7 @@ static void genesis_link_up(struct skge_port *skge)
 {
        struct skge_hw *hw = skge->hw;
        int port = skge->port;
-       u16 cmd;
+       u16 cmd, msk;
        u32 mode;
 
        cmd = xm_read16(hw, port, XM_MMU_CMD);
@@ -1591,8 +1626,8 @@ static void genesis_link_up(struct skge_port *skge)
         * enabling pause frame reception is required for 1000BT
         * because the XMAC is not reset if the link is going down
         */
-       if (skge->flow_control == FLOW_MODE_NONE ||
-           skge->flow_control == FLOW_MODE_LOC_SEND)
+       if (skge->flow_status == FLOW_STAT_NONE ||
+           skge->flow_status == FLOW_STAT_LOC_SEND)
                /* Disable Pause Frame Reception */
                cmd |= XM_MMU_IGN_PF;
        else
@@ -1602,8 +1637,8 @@ static void genesis_link_up(struct skge_port *skge)
        xm_write16(hw, port, XM_MMU_CMD, cmd);
 
        mode = xm_read32(hw, port, XM_MODE);
-       if (skge->flow_control == FLOW_MODE_SYMMETRIC ||
-           skge->flow_control == FLOW_MODE_LOC_SEND) {
+       if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
+           skge->flow_status == FLOW_STAT_LOC_SEND) {
                /*
                 * Configure Pause Frame Generation
                 * Use internal and external Pause Frame Generation.
@@ -1631,7 +1666,11 @@ static void genesis_link_up(struct skge_port *skge)
        }
 
        xm_write32(hw, port, XM_MODE, mode);
-       xm_write16(hw, port, XM_IMSK, XM_DEF_MSK);
+       msk = XM_DEF_MSK;
+       if (hw->phy_type != SK_PHY_XMAC)
+               msk |= XM_IS_INP_ASS;   /* disable GP0 interrupt bit */
+
+       xm_write16(hw, port, XM_IMSK, msk);
        xm_read16(hw, port, XM_ISRC);
 
        /* get MMU Command Reg. */
@@ -1779,11 +1818,17 @@ static void yukon_init(struct skge_hw *hw, int port)
                                adv |= PHY_M_AN_10_FD;
                        if (skge->advertising & ADVERTISED_10baseT_Half)
                                adv |= PHY_M_AN_10_HD;
-               } else  /* special defines for FIBER (88E1011S only) */
-                       adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
 
-               /* Set Flow-control capabilities */
-               adv |= phy_pause_map[skge->flow_control];
+                       /* Set Flow-control capabilities */
+                       adv |= phy_pause_map[skge->flow_control];
+               } else {
+                       if (skge->advertising & ADVERTISED_1000baseT_Full)
+                               adv |= PHY_M_AN_1000X_AFD;
+                       if (skge->advertising & ADVERTISED_1000baseT_Half)
+                               adv |= PHY_M_AN_1000X_AHD;
+
+                       adv |= fiber_pause_map[skge->flow_control];
+               }
 
                /* Restart Auto-negotiation */
                ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
@@ -1917,6 +1962,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
        case FLOW_MODE_LOC_SEND:
                /* disable Rx flow-control */
                reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+               break;
+       case FLOW_MODE_SYMMETRIC:
+       case FLOW_MODE_SYM_OR_REM:
+               /* enable Tx & Rx flow-control */
+               break;
        }
 
        gma_write16(hw, port, GM_GP_CTRL, reg);
@@ -2111,13 +2161,11 @@ static void yukon_link_down(struct skge_port *skge)
        ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
        gma_write16(hw, port, GM_GP_CTRL, ctrl);
 
-       if (skge->flow_control == FLOW_MODE_REM_SEND) {
+       if (skge->flow_status == FLOW_STAT_REM_SEND) {
+               ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
+               ctrl |= PHY_M_AN_ASP;
                /* restore Asymmetric Pause bit */
-               gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
-                                 gm_phy_read(hw, port,
-                                                  PHY_MARV_AUNE_ADV)
-                                 | PHY_M_AN_ASP);
-
+               gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
        }
 
        yukon_reset(hw, port);
@@ -2164,19 +2212,19 @@ static void yukon_phy_intr(struct skge_port *skge)
                /* We are using IEEE 802.3z/D5.0 Table 37-4 */
                switch (phystat & PHY_M_PS_PAUSE_MSK) {
                case PHY_M_PS_PAUSE_MSK:
-                       skge->flow_control = FLOW_MODE_SYMMETRIC;
+                       skge->flow_status = FLOW_STAT_SYMMETRIC;
                        break;
                case PHY_M_PS_RX_P_EN:
-                       skge->flow_control = FLOW_MODE_REM_SEND;
+                       skge->flow_status = FLOW_STAT_REM_SEND;
                        break;
                case PHY_M_PS_TX_P_EN:
-                       skge->flow_control = FLOW_MODE_LOC_SEND;
+                       skge->flow_status = FLOW_STAT_LOC_SEND;
                        break;
                default:
-                       skge->flow_control = FLOW_MODE_NONE;
+                       skge->flow_status = FLOW_STAT_NONE;
                }
 
-               if (skge->flow_control == FLOW_MODE_NONE ||
+               if (skge->flow_status == FLOW_STAT_NONE ||
                    (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
                        skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
                else
@@ -3399,7 +3447,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
 
        /* Auto speed and flow control */
        skge->autoneg = AUTONEG_ENABLE;
-       skge->flow_control = FLOW_MODE_SYMMETRIC;
+       skge->flow_control = FLOW_MODE_SYM_OR_REM;
        skge->duplex = -1;
        skge->speed = -1;
        skge->advertising = skge_supported_modes(hw);
index d0b47d46cf9d23701239d7b5c6d5c7d32c477c3d..537c0aaa1db8de7b327572657fea85003548afda 100644 (file)
@@ -2195,7 +2195,8 @@ enum {
        XM_IS_RX_COMP   = 1<<0, /* Bit  0:      Frame Rx Complete */
 };
 
-#define XM_DEF_MSK     (~(XM_IS_RXC_OV | XM_IS_TXC_OV | XM_IS_RXF_OV | XM_IS_TXF_UR))
+#define XM_DEF_MSK     (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | \
+                          XM_IS_RXF_OV | XM_IS_TXF_UR))
 
 
 /*     XM_HW_CFG       16 bit r/w      Hardware Config Register */
@@ -2426,13 +2427,24 @@ struct skge_hw {
        struct mutex         phy_mutex;
 };
 
-enum {
-       FLOW_MODE_NONE          = 0, /* No Flow-Control */
-       FLOW_MODE_LOC_SEND      = 1, /* Local station sends PAUSE */
-       FLOW_MODE_REM_SEND      = 2, /* Symmetric or just remote */
+enum pause_control {
+       FLOW_MODE_NONE          = 1, /* No Flow-Control */
+       FLOW_MODE_LOC_SEND      = 2, /* Local station sends PAUSE */
        FLOW_MODE_SYMMETRIC     = 3, /* Both stations may send PAUSE */
+       FLOW_MODE_SYM_OR_REM    = 4, /* Both stations may send PAUSE or
+                                     * just the remote station may send PAUSE
+                                     */
+};
+
+enum pause_status {
+       FLOW_STAT_INDETERMINATED=0,     /* indeterminated */
+       FLOW_STAT_NONE,                 /* No Flow Control */
+       FLOW_STAT_REM_SEND,             /* Remote Station sends PAUSE */
+       FLOW_STAT_LOC_SEND,             /* Local station sends PAUSE */
+       FLOW_STAT_SYMMETRIC,            /* Both station may send PAUSE */
 };
 
+
 struct skge_port {
        u32                  msg_enable;
        struct skge_hw       *hw;
@@ -2445,9 +2457,10 @@ struct skge_port {
        struct net_device_stats net_stats;
 
        struct work_struct   link_thread;
+       enum pause_control   flow_control;
+       enum pause_status    flow_status;
        u8                   rx_csum;
        u8                   blink_on;
-       u8                   flow_control;
        u8                   wol;
        u8                   autoneg;   /* AUTONEG_ENABLE, AUTONEG_DISABLE */
        u8                   duplex;    /* DUPLEX_HALF, DUPLEX_FULL */
index 459c845d664873e771a63f874489a030ed05602e..c10e7f5faa5f59be42894b51bf6a74e76c812714 100644 (file)
@@ -683,7 +683,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
        sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
 
        if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
-               sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
+               sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 512/8);
                sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
                if (hw->dev[port]->mtu > ETH_DATA_LEN) {
                        /* set Tx GMAC FIFO Almost Empty Threshold */
@@ -1907,7 +1907,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
                pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
                                               length, PCI_DMA_FROMDEVICE);
                re->skb->ip_summed = CHECKSUM_NONE;
-               __skb_put(skb, length);
+               skb_put(skb, length);
        }
        return skb;
 }
@@ -1970,7 +1970,7 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
        if (skb_shinfo(skb)->nr_frags)
                skb_put_frags(skb, hdr_space, length);
        else
-               skb_put(skb, hdr_space);
+               skb_put(skb, length);
        return skb;
 }
 
@@ -2220,8 +2220,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
                /* PCI-Express uncorrectable Error occurred */
                u32 pex_err;
 
-               pex_err = sky2_pci_read32(hw,
-                                         hw->err_cap + PCI_ERR_UNCOR_STATUS);
+               pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
 
                if (net_ratelimit())
                        printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
@@ -2229,20 +2228,15 @@ static void sky2_hw_intr(struct sky2_hw *hw)
 
                /* clear the interrupt */
                sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-               sky2_pci_write32(hw,
-                                hw->err_cap + PCI_ERR_UNCOR_STATUS,
-                                0xffffffffUL);
+               sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
+                                      0xffffffffUL);
                sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 
-
-               /* In case of fatal error mask off to keep from getting stuck */
-               if (pex_err & (PCI_ERR_UNC_POISON_TLP | PCI_ERR_UNC_FCP
-                              | PCI_ERR_UNC_DLP)) {
+               if (pex_err & PEX_FATAL_ERRORS) {
                        u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
                        hwmsk &= ~Y2_IS_PCI_EXP;
                        sky2_write32(hw, B0_HWE_IMSK, hwmsk);
                }
-
        }
 
        if (status & Y2_HWE_L1_MASK)
@@ -2423,7 +2417,6 @@ static int sky2_reset(struct sky2_hw *hw)
        u16 status;
        u8 t8;
        int i;
-       u32 msk;
 
        sky2_write8(hw, B0_CTST, CS_RST_CLR);
 
@@ -2464,13 +2457,9 @@ static int sky2_reset(struct sky2_hw *hw)
        sky2_write8(hw, B0_CTST, CS_MRST_CLR);
 
        /* clear any PEX errors */
-       if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) {
-               hw->err_cap = pci_find_ext_capability(hw->pdev, PCI_EXT_CAP_ID_ERR);
-               if (hw->err_cap)
-                       sky2_pci_write32(hw,
-                                        hw->err_cap + PCI_ERR_UNCOR_STATUS,
-                                        0xffffffffUL);
-       }
+       if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
+               sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
+
 
        hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
        hw->ports = 1;
@@ -2527,10 +2516,7 @@ static int sky2_reset(struct sky2_hw *hw)
                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
        }
 
-       msk = Y2_HWE_ALL_MASK;
-       if (!hw->err_cap)
-               msk &= ~Y2_IS_PCI_EXP;
-       sky2_write32(hw, B0_HWE_IMSK, msk);
+       sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
 
        for (i = 0; i < hw->ports; i++)
                sky2_gmac_reset(hw, i);
index f66109a96d95b0b2f13dfeba2c02f13241507a8d..43d2accf60e12ccdbfaf57c4e166c30492936dd3 100644 (file)
@@ -6,15 +6,24 @@
 
 #define ETH_JUMBO_MTU          9000    /* Maximum MTU supported */
 
-/* PCI device specific config registers */
+/* PCI config registers */
 enum {
        PCI_DEV_REG1    = 0x40,
        PCI_DEV_REG2    = 0x44,
+       PCI_DEV_STATUS  = 0x7c,
        PCI_DEV_REG3    = 0x80,
        PCI_DEV_REG4    = 0x84,
        PCI_DEV_REG5    = 0x88,
 };
 
+enum {
+       PEX_DEV_CAP     = 0xe4,
+       PEX_DEV_CTRL    = 0xe8,
+       PEX_DEV_STA     = 0xea,
+       PEX_LNK_STAT    = 0xf2,
+       PEX_UNC_ERR_STAT= 0x104,
+};
+
 /* Yukon-2 */
 enum pci_dev_reg_1 {
        PCI_Y2_PIG_ENA   = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
@@ -63,6 +72,39 @@ enum pci_dev_reg_4 {
                               PCI_STATUS_REC_MASTER_ABORT | \
                               PCI_STATUS_REC_TARGET_ABORT | \
                               PCI_STATUS_PARITY)
+
+enum pex_dev_ctrl {
+       PEX_DC_MAX_RRS_MSK      = 7<<12, /* Bit 14..12: Max. Read Request Size */
+       PEX_DC_EN_NO_SNOOP      = 1<<11,/* Enable No Snoop */
+       PEX_DC_EN_AUX_POW       = 1<<10,/* Enable AUX Power */
+       PEX_DC_EN_PHANTOM       = 1<<9, /* Enable Phantom Functions */
+       PEX_DC_EN_EXT_TAG       = 1<<8, /* Enable Extended Tag Field */
+       PEX_DC_MAX_PLS_MSK      = 7<<5, /* Bit  7.. 5:  Max. Payload Size Mask */
+       PEX_DC_EN_REL_ORD       = 1<<4, /* Enable Relaxed Ordering */
+       PEX_DC_EN_UNS_RQ_RP     = 1<<3, /* Enable Unsupported Request Reporting */
+       PEX_DC_EN_FAT_ER_RP     = 1<<2, /* Enable Fatal Error Reporting */
+       PEX_DC_EN_NFA_ER_RP     = 1<<1, /* Enable Non-Fatal Error Reporting */
+       PEX_DC_EN_COR_ER_RP     = 1<<0, /* Enable Correctable Error Reporting */
+};
+#define  PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK)
+
+/* PEX_UNC_ERR_STAT     PEX Uncorrectable Errors Status Register (Yukon-2) */
+enum pex_err {
+       PEX_UNSUP_REQ   = 1<<20, /* Unsupported Request Error */
+
+       PEX_MALFOR_TLP  = 1<<18, /* Malformed TLP */
+
+       PEX_UNEXP_COMP  = 1<<16, /* Unexpected Completion */
+
+       PEX_COMP_TO     = 1<<14, /* Completion Timeout */
+       PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */
+       PEX_POIS_TLP    = 1<<12, /* Poisoned TLP */
+
+       PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */
+       PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P),
+};
+
+
 enum csr_regs {
        B0_RAP          = 0x0000,
        B0_CTST         = 0x0004,
@@ -1836,7 +1878,6 @@ struct sky2_hw {
        struct net_device    *dev[2];
 
        int                  pm_cap;
-       int                  err_cap;
        u8                   chip_id;
        u8                   chip_rev;
        u8                   pmd_type;
index 636dbfcdf8cb92a81449f5e81080103102ebf981..0c9f1e7dab2e8de01277bef61c3f7b072792ca9b 100644 (file)
@@ -398,6 +398,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
 
 #define SMC_IRQ_FLAGS          (0)
 
+#elif  defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT       1
+#define SMC_CAN_USE_16BIT      1
+#define SMC_CAN_USE_32BIT      1
+#define SMC_NOWAIT             1
+
+#define SMC_inb(a, r)          readb((a) + (r))
+#define SMC_inw(a, r)          readw((a) + (r))
+#define SMC_inl(a, r)          readl((a) + (r))
+#define SMC_outb(v, a, r)      writeb(v, (a) + (r))
+#define SMC_outw(v, a, r)      writew(v, (a) + (r))
+#define SMC_outl(v, a, r)      writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l)   readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l)  writesl((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS          (0)
+
 #else
 
 #define SMC_CAN_USE_8BIT       1
index 46a009085f7c097b6d09aef792935e389f82e520..418138dd6c687452afe9636bafb87c0658a69712 100644 (file)
@@ -55,12 +55,13 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
              "<Jens.Osterkamp@de.ibm.com>");
 MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
 MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
 
 static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
 static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
 
-module_param(rx_descriptors, int, 0644);
-module_param(tx_descriptors, int, 0644);
+module_param(rx_descriptors, int, 0444);
+module_param(tx_descriptors, int, 0444);
 
 MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
                 "in rx chains");
@@ -300,7 +301,7 @@ static int
 spider_net_init_chain(struct spider_net_card *card,
                       struct spider_net_descr_chain *chain,
                       struct spider_net_descr *start_descr,
-                      int direction, int no)
+                      int no)
 {
        int i;
        struct spider_net_descr *descr;
@@ -315,7 +316,7 @@ spider_net_init_chain(struct spider_net_card *card,
 
                buf = pci_map_single(card->pdev, descr,
                                     SPIDER_NET_DESCR_SIZE,
-                                    direction);
+                                    PCI_DMA_BIDIRECTIONAL);
 
                if (pci_dma_mapping_error(buf))
                        goto iommu_error;
@@ -329,11 +330,6 @@ spider_net_init_chain(struct spider_net_card *card,
        (descr-1)->next = start_descr;
        start_descr->prev = descr-1;
 
-       descr = start_descr;
-       if (direction == PCI_DMA_FROMDEVICE)
-               for (i=0; i < no; i++, descr++)
-                       descr->next_descr_addr = descr->next->bus_addr;
-
        spin_lock_init(&chain->lock);
        chain->head = start_descr;
        chain->tail = start_descr;
@@ -346,7 +342,7 @@ iommu_error:
                if (descr->bus_addr)
                        pci_unmap_single(card->pdev, descr->bus_addr,
                                         SPIDER_NET_DESCR_SIZE,
-                                        direction);
+                                        PCI_DMA_BIDIRECTIONAL);
        return -ENOMEM;
 }
 
@@ -362,15 +358,15 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
        struct spider_net_descr *descr;
 
        descr = card->rx_chain.head;
-       while (descr->next != card->rx_chain.head) {
+       do {
                if (descr->skb) {
                        dev_kfree_skb(descr->skb);
                        pci_unmap_single(card->pdev, descr->buf_addr,
                                         SPIDER_NET_MAX_FRAME,
-                                        PCI_DMA_FROMDEVICE);
+                                        PCI_DMA_BIDIRECTIONAL);
                }
                descr = descr->next;
-       }
+       } while (descr != card->rx_chain.head);
 }
 
 /**
@@ -645,26 +641,41 @@ static int
 spider_net_prepare_tx_descr(struct spider_net_card *card,
                            struct sk_buff *skb)
 {
-       struct spider_net_descr *descr = card->tx_chain.head;
+       struct spider_net_descr *descr;
        dma_addr_t buf;
+       unsigned long flags;
+       int length;
 
-       buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
+       length = skb->len;
+       if (length < ETH_ZLEN) {
+               if (skb_pad(skb, ETH_ZLEN-length))
+                       return 0;
+               length = ETH_ZLEN;
+       }
+
+       buf = pci_map_single(card->pdev, skb->data, length, PCI_DMA_TODEVICE);
        if (pci_dma_mapping_error(buf)) {
                if (netif_msg_tx_err(card) && net_ratelimit())
                        pr_err("could not iommu-map packet (%p, %i). "
-                                 "Dropping packet\n", skb->data, skb->len);
+                                 "Dropping packet\n", skb->data, length);
                card->spider_stats.tx_iommu_map_error++;
                return -ENOMEM;
        }
 
+       spin_lock_irqsave(&card->tx_chain.lock, flags);
+       descr = card->tx_chain.head;
+       card->tx_chain.head = descr->next;
+
        descr->buf_addr = buf;
-       descr->buf_size = skb->len;
+       descr->buf_size = length;
        descr->next_descr_addr = 0;
        descr->skb = skb;
        descr->data_status = 0;
 
        descr->dmac_cmd_status =
                        SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
+       spin_unlock_irqrestore(&card->tx_chain.lock, flags);
+
        if (skb->protocol == htons(ETH_P_IP))
                switch (skb->nh.iph->protocol) {
                case IPPROTO_TCP:
@@ -675,32 +686,51 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
                        break;
                }
 
+       /* Chain the bus address, so that the DMA engine finds this descr. */
        descr->prev->next_descr_addr = descr->bus_addr;
 
+       card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
        return 0;
 }
 
-/**
- * spider_net_release_tx_descr - processes a used tx descriptor
- * @card: card structure
- * @descr: descriptor to release
- *
- * releases a used tx descriptor (unmapping, freeing of skb)
- */
-static inline void
-spider_net_release_tx_descr(struct spider_net_card *card)
+static int
+spider_net_set_low_watermark(struct spider_net_card *card)
 {
+       unsigned long flags;
+       int status;
+       int cnt=0;
+       int i;
        struct spider_net_descr *descr = card->tx_chain.tail;
-       struct sk_buff *skb;
 
-       card->tx_chain.tail = card->tx_chain.tail->next;
-       descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
+       /* Measure the length of the queue. Measurement does not
+        * need to be precise -- does not need a lock. */
+       while (descr != card->tx_chain.head) {
+               status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
+               if (status == SPIDER_NET_DESCR_NOT_IN_USE)
+                       break;
+               descr = descr->next;
+               cnt++;
+       }
 
-       /* unmap the skb */
-       skb = descr->skb;
-       pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
-                       PCI_DMA_TODEVICE);
-       dev_kfree_skb_any(skb);
+       /* If TX queue is short, don't even bother with interrupts */
+       if (cnt < card->num_tx_desc/4)
+               return cnt;
+
+       /* Set low-watermark 3/4th's of the way into the queue. */
+       descr = card->tx_chain.tail;
+       cnt = (cnt*3)/4;
+       for (i=0;i<cnt; i++)
+               descr = descr->next;
+
+       /* Set the new watermark, clear the old watermark */
+       spin_lock_irqsave(&card->tx_chain.lock, flags);
+       descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
+       if (card->low_watermark && card->low_watermark != descr)
+               card->low_watermark->dmac_cmd_status =
+                    card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
+       card->low_watermark = descr;
+       spin_unlock_irqrestore(&card->tx_chain.lock, flags);
+       return cnt;
 }
 
 /**
@@ -719,21 +749,29 @@ static int
 spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
 {
        struct spider_net_descr_chain *chain = &card->tx_chain;
+       struct spider_net_descr *descr;
+       struct sk_buff *skb;
+       u32 buf_addr;
+       unsigned long flags;
        int status;
 
-       spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
-
        while (chain->tail != chain->head) {
-               status = spider_net_get_descr_status(chain->tail);
+               spin_lock_irqsave(&chain->lock, flags);
+               descr = chain->tail;
+
+               status = spider_net_get_descr_status(descr);
                switch (status) {
                case SPIDER_NET_DESCR_COMPLETE:
                        card->netdev_stats.tx_packets++;
-                       card->netdev_stats.tx_bytes += chain->tail->skb->len;
+                       card->netdev_stats.tx_bytes += descr->skb->len;
                        break;
 
                case SPIDER_NET_DESCR_CARDOWNED:
-                       if (!brutal)
+                       if (!brutal) {
+                               spin_unlock_irqrestore(&chain->lock, flags);
                                return 1;
+                       }
+
                        /* fallthrough, if we release the descriptors
                         * brutally (then we don't care about
                         * SPIDER_NET_DESCR_CARDOWNED) */
@@ -750,11 +788,25 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
 
                default:
                        card->netdev_stats.tx_dropped++;
-                       return 1;
+                       if (!brutal) {
+                               spin_unlock_irqrestore(&chain->lock, flags);
+                               return 1;
+                       }
                }
-               spider_net_release_tx_descr(card);
-       }
 
+               chain->tail = descr->next;
+               descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
+               skb = descr->skb;
+               buf_addr = descr->buf_addr;
+               spin_unlock_irqrestore(&chain->lock, flags);
+
+               /* unmap the skb */
+               if (skb) {
+                       int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+                       pci_unmap_single(card->pdev, buf_addr, len, PCI_DMA_TODEVICE);
+                       dev_kfree_skb(skb);
+               }
+       }
        return 0;
 }
 
@@ -763,8 +815,12 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
  * @card: card structure
  * @descr: descriptor address to enable TX processing at
  *
- * spider_net_kick_tx_dma writes the current tx chain head as start address
- * of the tx descriptor chain and enables the transmission DMA engine
+ * This routine will start the transmit DMA running if
+ * it is not already running. This routine ned only be
+ * called when queueing a new packet to an empty tx queue.
+ * Writes the current tx chain head as start address
+ * of the tx descriptor chain and enables the transmission
+ * DMA engine.
  */
 static inline void
 spider_net_kick_tx_dma(struct spider_net_card *card)
@@ -804,65 +860,43 @@ out:
 static int
 spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
+       int cnt;
        struct spider_net_card *card = netdev_priv(netdev);
        struct spider_net_descr_chain *chain = &card->tx_chain;
-       struct spider_net_descr *descr = chain->head;
-       unsigned long flags;
-       int result;
-
-       spin_lock_irqsave(&chain->lock, flags);
 
        spider_net_release_tx_chain(card, 0);
 
-       if (chain->head->next == chain->tail->prev) {
-               card->netdev_stats.tx_dropped++;
-               result = NETDEV_TX_LOCKED;
-               goto out;
-       }
+       if ((chain->head->next == chain->tail->prev) ||
+          (spider_net_prepare_tx_descr(card, skb) != 0)) {
 
-       if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
                card->netdev_stats.tx_dropped++;
-               result = NETDEV_TX_LOCKED;
-               goto out;
+               netif_stop_queue(netdev);
+               return NETDEV_TX_BUSY;
        }
 
-       if (spider_net_prepare_tx_descr(card, skb) != 0) {
-               card->netdev_stats.tx_dropped++;
-               result = NETDEV_TX_BUSY;
-               goto out;
-       }
-
-       result = NETDEV_TX_OK;
-
-       spider_net_kick_tx_dma(card);
-       card->tx_chain.head = card->tx_chain.head->next;
-
-out:
-       spin_unlock_irqrestore(&chain->lock, flags);
-       netif_wake_queue(netdev);
-       return result;
+       cnt = spider_net_set_low_watermark(card);
+       if (cnt < 5)
+               spider_net_kick_tx_dma(card);
+       return NETDEV_TX_OK;
 }
 
 /**
  * spider_net_cleanup_tx_ring - cleans up the TX ring
  * @card: card structure
  *
- * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
- * interrupts to cleanup our TX ring) and returns sent packets to the stack
- * by freeing them
+ * spider_net_cleanup_tx_ring is called by either the tx_timer
+ * or from the NAPI polling routine.
+ * This routine releases resources associted with transmitted
+ * packets, including updating the queue tail pointer.
  */
 static void
 spider_net_cleanup_tx_ring(struct spider_net_card *card)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&card->tx_chain.lock, flags);
-
        if ((spider_net_release_tx_chain(card, 0) != 0) &&
-           (card->netdev->flags & IFF_UP))
+           (card->netdev->flags & IFF_UP)) {
                spider_net_kick_tx_dma(card);
-
-       spin_unlock_irqrestore(&card->tx_chain.lock, flags);
+               netif_wake_queue(card->netdev);
+       }
 }
 
 /**
@@ -1053,6 +1087,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
        int packets_to_do, packets_done = 0;
        int no_more_packets = 0;
 
+       spider_net_cleanup_tx_ring(card);
        packets_to_do = min(*budget, netdev->quota);
 
        while (packets_to_do) {
@@ -1243,12 +1278,15 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
        case SPIDER_NET_PHYINT:
        case SPIDER_NET_GMAC2INT:
        case SPIDER_NET_GMAC1INT:
-       case SPIDER_NET_GIPSINT:
        case SPIDER_NET_GFIFOINT:
        case SPIDER_NET_DMACINT:
        case SPIDER_NET_GSYSINT:
                break; */
 
+       case SPIDER_NET_GIPSINT:
+               show_error = 0;
+               break;
+
        case SPIDER_NET_GPWOPCMPINT:
                /* PHY write operation completed */
                show_error = 0;
@@ -1307,9 +1345,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
        case SPIDER_NET_GDTDCEINT:
                /* chain end. If a descriptor should be sent, kick off
                 * tx dma
-               if (card->tx_chain.tail == card->tx_chain.head)
+               if (card->tx_chain.tail != card->tx_chain.head)
                        spider_net_kick_tx_dma(card);
-               show_error = 0; */
+               */
+               show_error = 0;
                break;
 
        /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
@@ -1354,7 +1393,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
                if (netif_msg_intr(card))
                        pr_err("got descriptor chain end interrupt, "
                               "restarting DMAC %c.\n",
-                              'D'+i-SPIDER_NET_GDDDCEINT);
+                              'D'-(i-SPIDER_NET_GDDDCEINT)/3);
                spider_net_refill_rx_chain(card);
                spider_net_enable_rxdmac(card);
                show_error = 0;
@@ -1423,8 +1462,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
        }
 
        if ((show_error) && (netif_msg_intr(card)))
-               pr_err("Got error interrupt, GHIINT0STS = 0x%08x, "
+               pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
                       "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
+                      card->netdev->name,
                       status_reg, error_reg1, error_reg2);
 
        /* clear interrupt sources */
@@ -1460,6 +1500,8 @@ spider_net_interrupt(int irq, void *ptr)
                spider_net_rx_irq_off(card);
                netif_rx_schedule(netdev);
        }
+       if (status_reg & SPIDER_NET_TXINT)
+               netif_rx_schedule(netdev);
 
        if (status_reg & SPIDER_NET_ERRINT )
                spider_net_handle_error_irq(card, status_reg);
@@ -1599,7 +1641,7 @@ spider_net_enable_card(struct spider_net_card *card)
                             SPIDER_NET_INT2_MASK_VALUE);
 
        spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
-                            SPIDER_NET_GDTDCEIDIS);
+                            SPIDER_NET_GDTBSTA | SPIDER_NET_GDTDCEIDIS);
 }
 
 /**
@@ -1615,17 +1657,26 @@ int
 spider_net_open(struct net_device *netdev)
 {
        struct spider_net_card *card = netdev_priv(netdev);
-       int result;
+       struct spider_net_descr *descr;
+       int i, result;
 
        result = -ENOMEM;
        if (spider_net_init_chain(card, &card->tx_chain, card->descr,
-                       PCI_DMA_TODEVICE, card->tx_desc))
+                                 card->num_tx_desc))
                goto alloc_tx_failed;
+
+       card->low_watermark = NULL;
+
+       /* rx_chain is after tx_chain, so offset is descr + tx_count */
        if (spider_net_init_chain(card, &card->rx_chain,
-                       card->descr + card->rx_desc,
-                       PCI_DMA_FROMDEVICE, card->rx_desc))
+                                 card->descr + card->num_tx_desc,
+                                 card->num_rx_desc))
                goto alloc_rx_failed;
 
+       descr = card->rx_chain.head;
+       for (i=0; i < card->num_rx_desc; i++, descr++)
+               descr->next_descr_addr = descr->next->bus_addr;
+
        /* allocate rx skbs */
        if (spider_net_alloc_rx_skbs(card))
                goto alloc_skbs_failed;
@@ -1878,10 +1929,7 @@ spider_net_stop(struct net_device *netdev)
        spider_net_disable_rxdmac(card);
 
        /* release chains */
-       if (spin_trylock(&card->tx_chain.lock)) {
-               spider_net_release_tx_chain(card, 1);
-               spin_unlock(&card->tx_chain.lock);
-       }
+       spider_net_release_tx_chain(card, 1);
 
        spider_net_free_chain(card, &card->tx_chain);
        spider_net_free_chain(card, &card->rx_chain);
@@ -2012,8 +2060,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
 
        card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
 
-       card->tx_desc = tx_descriptors;
-       card->rx_desc = rx_descriptors;
+       card->num_tx_desc = tx_descriptors;
+       card->num_rx_desc = rx_descriptors;
 
        spider_net_setup_netdev_ops(netdev);
 
@@ -2252,6 +2300,8 @@ static struct pci_driver spider_net_driver = {
  */
 static int __init spider_net_init(void)
 {
+       printk(KERN_INFO "Spidernet version %s.\n", VERSION);
+
        if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
                rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
                pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
index a59deda2f95e1b9e479850a6b4f3a23fe81314f8..b3b46119b4243f141823efe7358d4d01c8bdb5a0 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef _SPIDER_NET_H
 #define _SPIDER_NET_H
 
+#define VERSION "1.1 A"
+
 #include "sungem_phy.h"
 
 extern int spider_net_stop(struct net_device *netdev);
@@ -47,7 +49,7 @@ extern char spider_net_driver_name[];
 #define SPIDER_NET_TX_DESCRIPTORS_MIN          16
 #define SPIDER_NET_TX_DESCRIPTORS_MAX          512
 
-#define SPIDER_NET_TX_TIMER                    20
+#define SPIDER_NET_TX_TIMER                    (HZ/5)
 
 #define SPIDER_NET_RX_CSUM_DEFAULT             1
 
@@ -189,7 +191,9 @@ extern char spider_net_driver_name[];
 #define SPIDER_NET_MACMODE_VALUE       0x00000001
 #define SPIDER_NET_BURSTLMT_VALUE      0x00000200 /* about 16 us */
 
-/* 1(0)                                        enable r/tx dma
+/* DMAC control register GDMACCNTR
+ *
+ * 1(0)                                enable r/tx dma
  *  0000000                            fixed to 0
  *
  *         000000                      fixed to 0
@@ -198,6 +202,7 @@ extern char spider_net_driver_name[];
  *
  *                 000000              fixed to 0
  *                       00            burst alignment: 128 bytes
+ *                       11            burst alignment: 1024 bytes
  *
  *                         00000       fixed to 0
  *                              0      descr writeback size 32 bytes
@@ -208,10 +213,13 @@ extern char spider_net_driver_name[];
 #define SPIDER_NET_DMA_RX_VALUE                0x80000000
 #define SPIDER_NET_DMA_RX_FEND_VALUE   0x00030003
 /* to set TX_DMA_EN */
-#define SPIDER_NET_TX_DMA_EN           0x80000000
-#define SPIDER_NET_GDTDCEIDIS          0x00000002
-#define SPIDER_NET_DMA_TX_VALUE                SPIDER_NET_TX_DMA_EN | \
-                                       SPIDER_NET_GDTDCEIDIS
+#define SPIDER_NET_TX_DMA_EN           0x80000000
+#define SPIDER_NET_GDTBSTA             0x00000300
+#define SPIDER_NET_GDTDCEIDIS          0x00000002
+#define SPIDER_NET_DMA_TX_VALUE        SPIDER_NET_TX_DMA_EN | \
+                                       SPIDER_NET_GDTBSTA | \
+                                       SPIDER_NET_GDTDCEIDIS
+
 #define SPIDER_NET_DMA_TX_FEND_VALUE   0x00030003
 
 /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
@@ -320,13 +328,10 @@ enum spider_net_int2_status {
        SPIDER_NET_GRISPDNGINT
 };
 
-#define SPIDER_NET_TXINT       ( (1 << SPIDER_NET_GTTEDINT) | \
-                                 (1 << SPIDER_NET_GDTDCEINT) | \
-                                 (1 << SPIDER_NET_GDTFDCINT) )
+#define SPIDER_NET_TXINT       ( (1 << SPIDER_NET_GDTFDCINT) )
 
-/* we rely on flagged descriptor interrupts*/
-#define SPIDER_NET_RXINT       ( (1 << SPIDER_NET_GDAFDCINT) | \
-                                 (1 << SPIDER_NET_GRMFLLINT) )
+/* We rely on flagged descriptor interrupts */
+#define SPIDER_NET_RXINT       ( (1 << SPIDER_NET_GDAFDCINT) )
 
 #define SPIDER_NET_ERRINT      ( 0xffffffff & \
                                  (~SPIDER_NET_TXINT) & \
@@ -349,6 +354,7 @@ enum spider_net_int2_status {
 #define SPIDER_NET_DESCR_FORCE_END             0x50000000 /* used in rx and tx */
 #define SPIDER_NET_DESCR_CARDOWNED             0xA0000000 /* used in rx and tx */
 #define SPIDER_NET_DESCR_NOT_IN_USE            0xF0000000
+#define SPIDER_NET_DESCR_TXDESFLG              0x00800000
 
 struct spider_net_descr {
        /* as defined by the hardware */
@@ -433,6 +439,7 @@ struct spider_net_card {
 
        struct spider_net_descr_chain tx_chain;
        struct spider_net_descr_chain rx_chain;
+       struct spider_net_descr *low_watermark;
 
        struct net_device_stats netdev_stats;
 
@@ -448,8 +455,8 @@ struct spider_net_card {
 
        /* for ethtool */
        int msg_enable;
-       int rx_desc;
-       int tx_desc;
+       int num_rx_desc;
+       int num_tx_desc;
        struct spider_net_extra_stats spider_stats;
 
        struct spider_net_descr descr[0];
index 589e43658dee35e3ccc0feec526ededf5406d6ef..91b99510291516d172701c2b7e9e9cff6f4f30ce 100644 (file)
@@ -76,7 +76,7 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
        /* clear and fill out info */
        memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
        strncpy(drvinfo->driver, spider_net_driver_name, 32);
-       strncpy(drvinfo->version, "0.1", 32);
+       strncpy(drvinfo->version, VERSION, 32);
        strcpy(drvinfo->fw_version, "no information");
        strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
 }
@@ -158,9 +158,9 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev,
        struct spider_net_card *card = netdev->priv;
 
        ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
-       ering->tx_pending = card->tx_desc;
+       ering->tx_pending = card->num_tx_desc;
        ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
-       ering->rx_pending = card->rx_desc;
+       ering->rx_pending = card->num_rx_desc;
 }
 
 static int spider_net_get_stats_count(struct net_device *netdev)
index d1d1885b0295d2ae82f3e733ec65569cd52c9747..a3220a96524f0ead88f6a6b0089ec569007627ea 100644 (file)
@@ -330,7 +330,7 @@ out2:
 out1:
        free_netdev(dev);
 out:
-       iounmap((void *)ioaddr);
+       iounmap((void __iomem *)ioaddr);
        return ERR_PTR(err);
 }
 
index 91c76544e4dd462dde12acb1fdf4d15c80a2a3dc..b865db363ba0bf62fde272a2063fe7135dee788c 100644 (file)
@@ -286,7 +286,7 @@ struct net_device * __init sun3lance_probe(int unit)
 
 out1:
 #ifdef CONFIG_SUN3
-       iounmap((void *)dev->base_addr);
+       iounmap((void __iomem *)dev->base_addr);
 #endif
 out:
        free_netdev(dev);
@@ -326,7 +326,7 @@ static int __init lance_probe( struct net_device *dev)
                ioaddr_probe[1] = tmp2;
 
 #ifdef CONFIG_SUN3
-               iounmap((void *)ioaddr);
+               iounmap((void __iomem *)ioaddr);
 #endif
                return 0;
        }
@@ -956,7 +956,7 @@ void cleanup_module(void)
 {
        unregister_netdev(sun3lance_dev);
 #ifdef CONFIG_SUN3
-       iounmap((void *)sun3lance_dev->base_addr);
+       iounmap((void __iomem *)sun3lance_dev->base_addr);
 #endif
        free_netdev(sun3lance_dev);
 }
index 2cfd9634895a49eefdad26dbbd591ce20e65eaae..f6b3a94e97bfe52727fa39c55772a8eac7d73bfc 100644 (file)
@@ -1730,7 +1730,7 @@ static void __init de21040_get_media_info(struct de_private *de)
 }
 
 /* Note: this routine returns extra data bits for size detection. */
-static unsigned __init tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
+static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
 {
        int i;
        unsigned retval = 0;
@@ -1926,7 +1926,7 @@ bad_srom:
        goto fill_defaults;
 }
 
-static int __init de_init_one (struct pci_dev *pdev,
+static int __devinit de_init_one (struct pci_dev *pdev,
                                  const struct pci_device_id *ent)
 {
        struct net_device *dev;
@@ -2082,7 +2082,7 @@ err_out_free:
        return rc;
 }
 
-static void __exit de_remove_one (struct pci_dev *pdev)
+static void __devexit de_remove_one (struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct de_private *de = dev->priv;
@@ -2164,7 +2164,7 @@ static struct pci_driver de_driver = {
        .name           = DRV_NAME,
        .id_table       = de_pci_tbl,
        .probe          = de_init_one,
-       .remove         = __exit_p(de_remove_one),
+       .remove         = __devexit_p(de_remove_one),
 #ifdef CONFIG_PM
        .suspend        = de_suspend,
        .resume         = de_resume,
index 30294127a0aa90484b907c235d4424ee773336b0..ecc50db8585ab287a9e25ca316f039a8546eef1e 100644 (file)
@@ -55,7 +55,7 @@ config PCI_DEBUG
 config HT_IRQ
        bool "Interrupts on hypertransport devices"
        default y
-       depends on X86_LOCAL_APIC && X86_IO_APIC
+       depends on PCI && X86_LOCAL_APIC && X86_IO_APIC
        help
           This allows native hypertransport devices to use interrupts.
 
index 0b20dfacbf595c6fcc1c00855771688582f3780c..d9417072807506771280912c887d5c8c9da27877 100644 (file)
@@ -136,7 +136,7 @@ static int max6902_get_datetime(struct device *dev, struct rtc_time *dt)
        dt->tm_min      = BCD2BIN(chip->buf[2]);
        dt->tm_hour     = BCD2BIN(chip->buf[3]);
        dt->tm_mday     = BCD2BIN(chip->buf[4]);
-       dt->tm_mon      = BCD2BIN(chip->buf[5] - 1);
+       dt->tm_mon      = BCD2BIN(chip->buf[5]) - 1;
        dt->tm_wday     = BCD2BIN(chip->buf[6]);
        dt->tm_year = BCD2BIN(chip->buf[7]);
 
index 8b6efcc05058ff5e66706735ccccc19d406146f2..143302a8e79c5ed283a86b960850aa753014592e 100644 (file)
@@ -160,7 +160,7 @@ static int sh_rtc_open(struct device *dev)
        tmp |= RCR1_CIE;
        writeb(tmp, rtc->regbase + RCR1);
 
-       ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, SA_INTERRUPT,
+       ret = request_irq(rtc->periodic_irq, sh_rtc_periodic, IRQF_DISABLED,
                          "sh-rtc period", dev);
        if (unlikely(ret)) {
                dev_err(dev, "request period IRQ failed with %d, IRQ %d\n",
@@ -168,7 +168,7 @@ static int sh_rtc_open(struct device *dev)
                return ret;
        }
 
-       ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, SA_INTERRUPT,
+       ret = request_irq(rtc->carry_irq, sh_rtc_interrupt, IRQF_DISABLED,
                          "sh-rtc carry", dev);
        if (unlikely(ret)) {
                dev_err(dev, "request carry IRQ failed with %d, IRQ %d\n",
@@ -177,7 +177,7 @@ static int sh_rtc_open(struct device *dev)
                goto err_bad_carry;
        }
 
-       ret = request_irq(rtc->alarm_irq, sh_rtc_interrupt, SA_INTERRUPT,
+       ret = request_irq(rtc->alarm_irq, sh_rtc_interrupt, IRQF_DISABLED,
                          "sh-rtc alarm", dev);
        if (unlikely(ret)) {
                dev_err(dev, "request alarm IRQ failed with %d, IRQ %d\n",
index 09b714f1cdc39f9dedb4cad38b5b036677a0d9dd..3b58d3d5d38a703ebc658564199d3644e016d0d3 100644 (file)
@@ -195,9 +195,9 @@ static int rtc_probe(struct platform_device *pdev)
         * are all disabled */
        v3020_set_reg(chip, V3020_STATUS_0, 0x0);
 
-       dev_info(&pdev->dev, "Chip available at physical address 0x%p,"
+       dev_info(&pdev->dev, "Chip available at physical address 0x%llx,"
                "data connected to D%d\n",
-               (void*)pdev->resource[0].start,
+               (unsigned long long)pdev->resource[0].start,
                chip->leftshift);
 
        platform_set_drvdata(pdev, chip);
index 4362ff2602446f67c3209eeb92e267f7bd5b62c4..abd02ed501cbe52cab031dadebf8087bf8290cf4 100644 (file)
@@ -110,7 +110,7 @@ static int monwrite_new_hdr(struct mon_private *monpriv)
                monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
                if (!monbuf)
                        return -ENOMEM;
-               monbuf->data = kzalloc(monbuf->hdr.datalen,
+               monbuf->data = kzalloc(monhdr->datalen,
                                       GFP_KERNEL | GFP_DMA);
                if (!monbuf->data) {
                        kfree(monbuf);
index 07c7f19339d2228db33f77e6a2adfe89015433e0..2d78f0f4a40fce98c589403106525fbf7a999058 100644 (file)
@@ -370,7 +370,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
        struct res_acc_data *res_data;
        struct subchannel *sch;
 
-       res_data = (struct res_acc_data *)data;
+       res_data = data;
        sch = get_subchannel_by_schid(schid);
        if (!sch)
                /* Check if a subchannel is newly available. */
@@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data)
                u32 isinfo[28];
        } *lir;
 
-       lir = (struct lir*) data;
+       lir = data;
        if (!(lir->iq&0x80))
                /* NULL link incident record */
                return -EINVAL;
@@ -628,7 +628,7 @@ __chp_add(struct subchannel_id schid, void *data)
        struct channel_path *chp;
        struct subchannel *sch;
 
-       chp = (struct channel_path *)data;
+       chp = data;
        sch = get_subchannel_by_schid(schid);
        if (!sch)
                /* Check if the subchannel is now available. */
@@ -707,8 +707,7 @@ chp_process_crw(int chpid, int on)
        return chp_add(chpid);
 }
 
-static inline int
-__check_for_io_and_kill(struct subchannel *sch, int index)
+static inline int check_for_io_on_path(struct subchannel *sch, int index)
 {
        int cc;
 
@@ -718,10 +717,8 @@ __check_for_io_and_kill(struct subchannel *sch, int index)
        cc = stsch(sch->schid, &sch->schib);
        if (cc)
                return 0;
-       if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
-               device_set_waiting(sch);
+       if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
                return 1;
-       }
        return 0;
 }
 
@@ -750,12 +747,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
                } else {
                        sch->opm &= ~(0x80 >> chp);
                        sch->lpm &= ~(0x80 >> chp);
-                       /*
-                        * Give running I/O a grace period in which it
-                        * can successfully terminate, even using the
-                        * just varied off path. Then kill it.
-                        */
-                       if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
+                       if (check_for_io_on_path(sch, chp))
+                               /* Path verification is done after killing. */
+                               device_kill_io(sch);
+                       else if (!sch->lpm) {
                                if (css_enqueue_subchannel_slow(sch->schid)) {
                                        css_clear_subchannel_slow_list();
                                        need_rescan = 1;
index f18b1623cad77a67795226927859bd86859eb347..8936e460a807cae8995eba957d224e9b9468c434 100644 (file)
@@ -609,8 +609,8 @@ do_IRQ (struct pt_regs *regs)
        struct irb *irb;
        struct pt_regs *old_regs;
 
-       irq_enter ();
        old_regs = set_irq_regs(regs);
+       irq_enter();
        asm volatile ("mc 0,0");
        if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
                /**
@@ -655,8 +655,8 @@ do_IRQ (struct pt_regs *regs)
                 * out of the sie which costs more cycles than it saves.
                 */
        } while (!MACHINE_IS_VM && tpi (NULL) != 0);
+       irq_exit();
        set_irq_regs(old_regs);
-       irq_exit ();
 }
 
 #ifdef CONFIG_CCW_CONSOLE
index 7086a74e9871df9f42ba4ff74a217761fcb83967..a2dee5bf5a17aa03b878bd91cffff090f2e27450 100644 (file)
@@ -177,7 +177,7 @@ get_subchannel_by_schid(struct subchannel_id schid)
        struct device *dev;
 
        dev = bus_find_device(&css_bus_type, NULL,
-                             (void *)&schid, check_subchannel);
+                             &schid, check_subchannel);
 
        return dev ? to_subchannel(dev) : NULL;
 }
index 8aabb4adeb5f08eb79ad7642005985f257e78815..4c2ff83362887aafe64fea2a5ebf6137b8456b9a 100644 (file)
@@ -76,9 +76,8 @@ struct ccw_device_private {
        int state;              /* device state */
        atomic_t onoff;
        unsigned long registered;
-       __u16 devno;            /* device number */
-       __u16 sch_no;           /* subchannel number */
-       __u8 ssid;              /* subchannel set id */
+       struct ccw_dev_id dev_id;       /* device id */
+       struct subchannel_id schid;     /* subchannel number */
        __u8 imask;             /* lpm mask for SNID/SID/SPGID */
        int iretry;             /* retry counter SNID/SID/SPGID */
        struct {
@@ -171,7 +170,7 @@ void device_trigger_reprobe(struct subchannel *);
 
 /* Helper functions for vary on/off. */
 int device_is_online(struct subchannel *);
-void device_set_waiting(struct subchannel *);
+void device_kill_io(struct subchannel *);
 
 /* Machine check helper function. */
 void device_kill_pending_timer(struct subchannel *);
index 688945662c151e16abbc65074dba3b6ed0ac06c8..94bdd4d8a4c9c205b72ef1b92dc598fb37b38d0e 100644 (file)
@@ -552,21 +552,19 @@ ccw_device_register(struct ccw_device *cdev)
 }
 
 struct match_data {
-       unsigned int devno;
-       unsigned int ssid;
+       struct ccw_dev_id dev_id;
        struct ccw_device * sibling;
 };
 
 static int
 match_devno(struct device * dev, void * data)
 {
-       struct match_data * d = (struct match_data *)data;
+       struct match_data * d = data;
        struct ccw_device * cdev;
 
        cdev = to_ccwdev(dev);
        if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
-           (cdev->private->devno == d->devno) &&
-           (cdev->private->ssid == d->ssid) &&
+           ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
            (cdev != d->sibling)) {
                cdev->private->state = DEV_STATE_NOT_OPER;
                return 1;
@@ -574,15 +572,13 @@ match_devno(struct device * dev, void * data)
        return 0;
 }
 
-static struct ccw_device *
-get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
-                        struct ccw_device *sibling)
+static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
+                                                    struct ccw_device *sibling)
 {
        struct device *dev;
        struct match_data data;
 
-       data.devno = devno;
-       data.ssid = ssid;
+       data.dev_id = *dev_id;
        data.sibling = sibling;
        dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
 
@@ -595,7 +591,7 @@ ccw_device_add_changed(void *data)
 
        struct ccw_device *cdev;
 
-       cdev = (struct ccw_device *)data;
+       cdev = data;
        if (device_add(&cdev->dev)) {
                put_device(&cdev->dev);
                return;
@@ -616,9 +612,9 @@ ccw_device_do_unreg_rereg(void *data)
        struct subchannel *sch;
        int need_rename;
 
-       cdev = (struct ccw_device *)data;
+       cdev = data;
        sch = to_subchannel(cdev->dev.parent);
-       if (cdev->private->devno != sch->schib.pmcw.dev) {
+       if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
                /*
                 * The device number has changed. This is usually only when
                 * a device has been detached under VM and then re-appeared
@@ -633,10 +629,12 @@ ccw_device_do_unreg_rereg(void *data)
                 *        get possibly sick...
                 */
                struct ccw_device *other_cdev;
+               struct ccw_dev_id dev_id;
 
                need_rename = 1;
-               other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
-                                                     sch->schid.ssid, cdev);
+               dev_id.devno = sch->schib.pmcw.dev;
+               dev_id.ssid = sch->schid.ssid;
+               other_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
                if (other_cdev) {
                        struct subchannel *other_sch;
 
@@ -652,7 +650,7 @@ ccw_device_do_unreg_rereg(void *data)
                }
                /* Update ssd info here. */
                css_get_ssd_info(sch);
-               cdev->private->devno = sch->schib.pmcw.dev;
+               cdev->private->dev_id.devno = sch->schib.pmcw.dev;
        } else
                need_rename = 0;
        device_remove_files(&cdev->dev);
@@ -662,7 +660,7 @@ ccw_device_do_unreg_rereg(void *data)
                snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
                          sch->schid.ssid, sch->schib.pmcw.dev);
        PREPARE_WORK(&cdev->private->kick_work,
-                    ccw_device_add_changed, (void *)cdev);
+                    ccw_device_add_changed, cdev);
        queue_work(ccw_device_work, &cdev->private->kick_work);
 }
 
@@ -687,7 +685,7 @@ io_subchannel_register(void *data)
        int ret;
        unsigned long flags;
 
-       cdev = (struct ccw_device *) data;
+       cdev = data;
        sch = to_subchannel(cdev->dev.parent);
 
        if (klist_node_attached(&cdev->dev.knode_parent)) {
@@ -759,7 +757,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
                        break;
                sch = to_subchannel(cdev->dev.parent);
                PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_call_sch_unregister, (void *) cdev);
+                            ccw_device_call_sch_unregister, cdev);
                queue_work(slow_path_wq, &cdev->private->kick_work);
                if (atomic_dec_and_test(&ccw_device_init_count))
                        wake_up(&ccw_device_init_wq);
@@ -774,7 +772,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
                if (!get_device(&cdev->dev))
                        break;
                PREPARE_WORK(&cdev->private->kick_work,
-                            io_subchannel_register, (void *) cdev);
+                            io_subchannel_register, cdev);
                queue_work(slow_path_wq, &cdev->private->kick_work);
                break;
        }
@@ -792,9 +790,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
 
        /* Init private data. */
        priv = cdev->private;
-       priv->devno = sch->schib.pmcw.dev;
-       priv->ssid = sch->schid.ssid;
-       priv->sch_no = sch->schid.sch_no;
+       priv->dev_id.devno = sch->schib.pmcw.dev;
+       priv->dev_id.ssid = sch->schid.ssid;
+       priv->schid = sch->schid;
        priv->state = DEV_STATE_NOT_OPER;
        INIT_LIST_HEAD(&priv->cmb_list);
        init_waitqueue_head(&priv->wait_q);
@@ -912,7 +910,7 @@ io_subchannel_remove (struct subchannel *sch)
         */
        if (get_device(&cdev->dev)) {
                PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_unregister, (void *) cdev);
+                            ccw_device_unregister, cdev);
                queue_work(ccw_device_work, &cdev->private->kick_work);
        }
        return 0;
@@ -1055,7 +1053,7 @@ __ccwdev_check_busid(struct device *dev, void *id)
 {
        char *bus_id;
 
-       bus_id = (char *)id;
+       bus_id = id;
 
        return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0);
 }
index 00be9a5b4acde18141a76d7aef38ad4b90b2347c..c6140cc97a80933bc129ca3b091183fdcd3ec747 100644 (file)
@@ -21,7 +21,6 @@ enum dev_state {
        /* states to wait for i/o completion before doing something */
        DEV_STATE_CLEAR_VERIFY,
        DEV_STATE_TIMEOUT_KILL,
-       DEV_STATE_WAIT4IO,
        DEV_STATE_QUIESCE,
        /* special states for devices gone not operational */
        DEV_STATE_DISCONNECTED,
index b67620208f36e2f463b4022a2a25e897197f3067..fcaf28d7b4eb6486f2addf5b462d8551bffb2072 100644 (file)
@@ -59,18 +59,6 @@ device_set_disconnected(struct subchannel *sch)
        cdev->private->state = DEV_STATE_DISCONNECTED;
 }
 
-void
-device_set_waiting(struct subchannel *sch)
-{
-       struct ccw_device *cdev;
-
-       if (!sch->dev.driver_data)
-               return;
-       cdev = sch->dev.driver_data;
-       ccw_device_set_timeout(cdev, 10*HZ);
-       cdev->private->state = DEV_STATE_WAIT4IO;
-}
-
 /*
  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
  */
@@ -183,9 +171,9 @@ ccw_device_handle_oper(struct ccw_device *cdev)
            cdev->id.cu_model != cdev->private->senseid.cu_model ||
            cdev->id.dev_type != cdev->private->senseid.dev_type ||
            cdev->id.dev_model != cdev->private->senseid.dev_model ||
-           cdev->private->devno != sch->schib.pmcw.dev) {
+           cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
                PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_do_unreg_rereg, (void *)cdev);
+                            ccw_device_do_unreg_rereg, cdev);
                queue_work(ccw_device_work, &cdev->private->kick_work);
                return 0;
        }
@@ -255,7 +243,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
        case DEV_STATE_NOT_OPER:
                CIO_DEBUG(KERN_WARNING, 2,
                          "SenseID : unknown device %04x on subchannel "
-                         "0.%x.%04x\n", cdev->private->devno,
+                         "0.%x.%04x\n", cdev->private->dev_id.devno,
                          sch->schid.ssid, sch->schid.sch_no);
                break;
        case DEV_STATE_OFFLINE:
@@ -282,14 +270,15 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
                CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
                          "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
                          "%04X/%02X\n",
-                         cdev->private->ssid, cdev->private->devno,
+                         cdev->private->dev_id.ssid,
+                         cdev->private->dev_id.devno,
                          cdev->id.cu_type, cdev->id.cu_model,
                          cdev->id.dev_type, cdev->id.dev_model);
                break;
        case DEV_STATE_BOXED:
                CIO_DEBUG(KERN_WARNING, 2,
                          "SenseID : boxed device %04x on subchannel "
-                         "0.%x.%04x\n", cdev->private->devno,
+                         "0.%x.%04x\n", cdev->private->dev_id.devno,
                          sch->schid.ssid, sch->schid.sch_no);
                break;
        }
@@ -325,13 +314,13 @@ ccw_device_oper_notify(void *data)
        struct subchannel *sch;
        int ret;
 
-       cdev = (struct ccw_device *)data;
+       cdev = data;
        sch = to_subchannel(cdev->dev.parent);
        ret = (sch->driver && sch->driver->notify) ?
                sch->driver->notify(&sch->dev, CIO_OPER) : 0;
        if (!ret)
                /* Driver doesn't want device back. */
-               ccw_device_do_unreg_rereg((void *)cdev);
+               ccw_device_do_unreg_rereg(cdev);
        else {
                /* Reenable channel measurements, if needed. */
                cmf_reenable(cdev);
@@ -363,12 +352,12 @@ ccw_device_done(struct ccw_device *cdev, int state)
        if (state == DEV_STATE_BOXED)
                CIO_DEBUG(KERN_WARNING, 2,
                          "Boxed device %04x on subchannel %04x\n",
-                         cdev->private->devno, sch->schid.sch_no);
+                         cdev->private->dev_id.devno, sch->schid.sch_no);
 
        if (cdev->private->flags.donotify) {
                cdev->private->flags.donotify = 0;
                PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
-                            (void *)cdev);
+                            cdev);
                queue_work(ccw_device_notify_work, &cdev->private->kick_work);
        }
        wake_up(&cdev->private->wait_q);
@@ -412,7 +401,8 @@ static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
                /* PGID mismatch, can't pathgroup. */
                CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
                              "0.%x.%04x, can't pathgroup\n",
-                             cdev->private->ssid, cdev->private->devno);
+                             cdev->private->dev_id.ssid,
+                             cdev->private->dev_id.devno);
                cdev->private->options.pgroup = 0;
                return;
        }
@@ -523,7 +513,7 @@ ccw_device_nopath_notify(void *data)
        struct subchannel *sch;
        int ret;
 
-       cdev = (struct ccw_device *)data;
+       cdev = data;
        sch = to_subchannel(cdev->dev.parent);
        /* Extra sanity. */
        if (sch->lpm)
@@ -537,7 +527,7 @@ ccw_device_nopath_notify(void *data)
                        if (get_device(&cdev->dev)) {
                                PREPARE_WORK(&cdev->private->kick_work,
                                             ccw_device_call_sch_unregister,
-                                            (void *)cdev);
+                                            cdev);
                                queue_work(ccw_device_work,
                                           &cdev->private->kick_work);
                        } else
@@ -592,7 +582,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
                break;
        default:
                PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_nopath_notify, (void *)cdev);
+                            ccw_device_nopath_notify, cdev);
                queue_work(ccw_device_notify_work, &cdev->private->kick_work);
                ccw_device_done(cdev, DEV_STATE_NOT_OPER);
                break;
@@ -723,7 +713,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
        sch = to_subchannel(cdev->dev.parent);
        if (get_device(&cdev->dev)) {
                PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_call_sch_unregister, (void *)cdev);
+                            ccw_device_call_sch_unregister, cdev);
                queue_work(ccw_device_work, &cdev->private->kick_work);
        }
        wake_up(&cdev->private->wait_q);
@@ -754,7 +744,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
        }
        if (get_device(&cdev->dev)) {
                PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_call_sch_unregister, (void *)cdev);
+                            ccw_device_call_sch_unregister, cdev);
                queue_work(ccw_device_work, &cdev->private->kick_work);
        }
        wake_up(&cdev->private->wait_q);
@@ -859,7 +849,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
                sch = to_subchannel(cdev->dev.parent);
                if (!sch->lpm) {
                        PREPARE_WORK(&cdev->private->kick_work,
-                                    ccw_device_nopath_notify, (void *)cdev);
+                                    ccw_device_nopath_notify, cdev);
                        queue_work(ccw_device_notify_work,
                                   &cdev->private->kick_work);
                } else
@@ -885,7 +875,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
                        /* Basic sense hasn't started. Try again. */
                        ccw_device_do_sense(cdev, irb);
                else {
-                       printk("Huh? %s(%s): unsolicited interrupt...\n",
+                       printk(KERN_INFO "Huh? %s(%s): unsolicited "
+                              "interrupt...\n",
                               __FUNCTION__, cdev->dev.bus_id);
                        if (cdev->handler)
                                cdev->handler (cdev, 0, irb);
@@ -944,10 +935,10 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
        cdev->private->state = DEV_STATE_ONLINE;
        if (cdev->handler)
                cdev->handler(cdev, cdev->private->intparm,
-                             ERR_PTR(-ETIMEDOUT));
+                             ERR_PTR(-EIO));
        if (!sch->lpm) {
                PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_nopath_notify, (void *)cdev);
+                            ccw_device_nopath_notify, cdev);
                queue_work(ccw_device_notify_work, &cdev->private->kick_work);
        } else if (cdev->private->flags.doverify)
                /* Start delayed path verification. */
@@ -970,7 +961,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
                sch = to_subchannel(cdev->dev.parent);
                if (!sch->lpm) {
                        PREPARE_WORK(&cdev->private->kick_work,
-                                    ccw_device_nopath_notify, (void *)cdev);
+                                    ccw_device_nopath_notify, cdev);
                        queue_work(ccw_device_notify_work,
                                   &cdev->private->kick_work);
                } else
@@ -981,51 +972,15 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
        cdev->private->state = DEV_STATE_ONLINE;
        if (cdev->handler)
                cdev->handler(cdev, cdev->private->intparm,
-                             ERR_PTR(-ETIMEDOUT));
-}
-
-static void
-ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
-{
-       struct irb *irb;
-       struct subchannel *sch;
-
-       irb = (struct irb *) __LC_IRB;
-       /*
-        * Accumulate status and find out if a basic sense is needed.
-        * This is fine since we have already adapted the lpm.
-        */
-       ccw_device_accumulate_irb(cdev, irb);
-       if (cdev->private->flags.dosense) {
-               if (ccw_device_do_sense(cdev, irb) == 0) {
-                       cdev->private->state = DEV_STATE_W4SENSE;
-               }
-               return;
-       }
-
-       /* Iff device is idle, reset timeout. */
-       sch = to_subchannel(cdev->dev.parent);
-       if (!stsch(sch->schid, &sch->schib))
-               if (sch->schib.scsw.actl == 0)
-                       ccw_device_set_timeout(cdev, 0);
-       /* Call the handler. */
-       ccw_device_call_handler(cdev);
-       if (!sch->lpm) {
-               PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_nopath_notify, (void *)cdev);
-               queue_work(ccw_device_notify_work, &cdev->private->kick_work);
-       } else if (cdev->private->flags.doverify)
-               ccw_device_online_verify(cdev, 0);
+                             ERR_PTR(-EIO));
 }
 
-static void
-ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
+void device_kill_io(struct subchannel *sch)
 {
        int ret;
-       struct subchannel *sch;
+       struct ccw_device *cdev;
 
-       sch = to_subchannel(cdev->dev.parent);
-       ccw_device_set_timeout(cdev, 0);
+       cdev = sch->dev.driver_data;
        ret = ccw_device_cancel_halt_clear(cdev);
        if (ret == -EBUSY) {
                ccw_device_set_timeout(cdev, 3*HZ);
@@ -1035,7 +990,7 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
        if (ret == -ENODEV) {
                if (!sch->lpm) {
                        PREPARE_WORK(&cdev->private->kick_work,
-                                    ccw_device_nopath_notify, (void *)cdev);
+                                    ccw_device_nopath_notify, cdev);
                        queue_work(ccw_device_notify_work,
                                   &cdev->private->kick_work);
                } else
@@ -1044,12 +999,12 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
        }
        if (cdev->handler)
                cdev->handler(cdev, cdev->private->intparm,
-                             ERR_PTR(-ETIMEDOUT));
+                             ERR_PTR(-EIO));
        if (!sch->lpm) {
                PREPARE_WORK(&cdev->private->kick_work,
-                            ccw_device_nopath_notify, (void *)cdev);
+                            ccw_device_nopath_notify, cdev);
                queue_work(ccw_device_notify_work, &cdev->private->kick_work);
-       } else if (cdev->private->flags.doverify)
+       } else
                /* Start delayed path verification. */
                ccw_device_online_verify(cdev, 0);
 }
@@ -1286,12 +1241,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
                [DEV_EVENT_TIMEOUT]     = ccw_device_killing_timeout,
                [DEV_EVENT_VERIFY]      = ccw_device_nop, //FIXME
        },
-       [DEV_STATE_WAIT4IO] = {
-               [DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
-               [DEV_EVENT_INTERRUPT]   = ccw_device_wait4io_irq,
-               [DEV_EVENT_TIMEOUT]     = ccw_device_wait4io_timeout,
-               [DEV_EVENT_VERIFY]      = ccw_device_delay_verify,
-       },
        [DEV_STATE_QUIESCE] = {
                [DEV_EVENT_NOTOPER]     = ccw_device_quiesce_done,
                [DEV_EVENT_INTERRUPT]   = ccw_device_quiesce_done,
index 1398367b5f68ef14f3591f9daf53e582475557b9..a74785b9e4ebe9303c4864d857a682f7f79f0259 100644 (file)
@@ -251,7 +251,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
                 */
                CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel "
                              "0.%x.%04x reports cmd reject\n",
-                             cdev->private->devno, sch->schid.ssid,
+                             cdev->private->dev_id.devno, sch->schid.ssid,
                              sch->schid.sch_no);
                return -EOPNOTSUPP;
        }
@@ -259,7 +259,8 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
                CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
                              "lpum %02X, cnt %02d, sns :"
                              " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
-                             cdev->private->ssid, cdev->private->devno,
+                             cdev->private->dev_id.ssid,
+                             cdev->private->dev_id.devno,
                              irb->esw.esw0.sublog.lpum,
                              irb->esw.esw0.erw.scnt,
                              irb->ecw[0], irb->ecw[1],
@@ -274,14 +275,15 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
                        CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
                                      "on subchannel 0.%x.%04x is "
                                      "'not operational'\n", sch->orb.lpm,
-                                     cdev->private->devno, sch->schid.ssid,
-                                     sch->schid.sch_no);
+                                     cdev->private->dev_id.devno,
+                                     sch->schid.ssid, sch->schid.sch_no);
                return -EACCES;
        }
        /* Hmm, whatever happened, try again. */
        CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
                      "subchannel 0.%x.%04x returns status %02X%02X\n",
-                     cdev->private->devno, sch->schid.ssid, sch->schid.sch_no,
+                     cdev->private->dev_id.devno, sch->schid.ssid,
+                     sch->schid.sch_no,
                      irb->scsw.dstat, irb->scsw.cstat);
        return -EAGAIN;
 }
@@ -330,7 +332,7 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
                /* fall through. */
        default:                /* Sense ID failed. Try asking VM. */
                if (MACHINE_IS_VM) {
-                       VM_virtual_device_info (cdev->private->devno,
+                       VM_virtual_device_info (cdev->private->dev_id.devno,
                                                &cdev->private->senseid);
                        if (cdev->private->senseid.cu_type != 0xFFFF) {
                                /* Got the device information from VM. */
index 84b9b18eabc25159d752ea1f78a0792263e97bd3..b39c1fa48acd22cac1fd92990da9699193913510 100644 (file)
@@ -50,7 +50,6 @@ ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
        if (cdev->private->state == DEV_STATE_NOT_OPER)
                return -ENODEV;
        if (cdev->private->state != DEV_STATE_ONLINE &&
-           cdev->private->state != DEV_STATE_WAIT4IO &&
            cdev->private->state != DEV_STATE_W4SENSE)
                return -EINVAL;
        sch = to_subchannel(cdev->dev.parent);
@@ -155,7 +154,6 @@ ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
        if (cdev->private->state == DEV_STATE_NOT_OPER)
                return -ENODEV;
        if (cdev->private->state != DEV_STATE_ONLINE &&
-           cdev->private->state != DEV_STATE_WAIT4IO &&
            cdev->private->state != DEV_STATE_W4SENSE)
                return -EINVAL;
        sch = to_subchannel(cdev->dev.parent);
@@ -592,13 +590,13 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
 int
 _ccw_device_get_subchannel_number(struct ccw_device *cdev)
 {
-       return cdev->private->sch_no;
+       return cdev->private->schid.sch_no;
 }
 
 int
 _ccw_device_get_device_number(struct ccw_device *cdev)
 {
-       return cdev->private->devno;
+       return cdev->private->dev_id.devno;
 }
 
 
index 84917b39de458c66ea57225e2d60a9f590e29257..2975ce888c19ca3c2dc2b7fcf32ba79442323a46 100644 (file)
@@ -79,7 +79,8 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
                        CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
                                      "0.%x.%04x, lpm %02X, became 'not "
                                      "operational'\n",
-                                     cdev->private->devno, sch->schid.ssid,
+                                     cdev->private->dev_id.devno,
+                                     sch->schid.ssid,
                                      sch->schid.sch_no, cdev->private->imask);
 
                }
@@ -135,7 +136,8 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
                CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
                              "lpum %02X, cnt %02d, sns : "
                              "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
-                             cdev->private->ssid, cdev->private->devno,
+                             cdev->private->dev_id.ssid,
+                             cdev->private->dev_id.devno,
                              irb->esw.esw0.sublog.lpum,
                              irb->esw.esw0.erw.scnt,
                              irb->ecw[0], irb->ecw[1],
@@ -147,7 +149,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
        if (irb->scsw.cc == 3) {
                CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
                              " lpm %02X, became 'not operational'\n",
-                             cdev->private->devno, sch->schid.ssid,
+                             cdev->private->dev_id.devno, sch->schid.ssid,
                              sch->schid.sch_no, sch->orb.lpm);
                return -EACCES;
        }
@@ -155,7 +157,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
        if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
                CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
                              "is reserved by someone else\n",
-                             cdev->private->devno, sch->schid.ssid,
+                             cdev->private->dev_id.devno, sch->schid.ssid,
                              sch->schid.sch_no);
                return -EUSERS;
        }
@@ -261,7 +263,7 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
        /* PGID command failed on this path. */
        CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
                      "0.%x.%04x, lpm %02X, became 'not operational'\n",
-                     cdev->private->devno, sch->schid.ssid,
+                     cdev->private->dev_id.devno, sch->schid.ssid,
                      sch->schid.sch_no, cdev->private->imask);
        return ret;
 }
@@ -301,7 +303,7 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
        /* nop command failed on this path. */
        CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel "
                      "0.%x.%04x, lpm %02X, became 'not operational'\n",
-                     cdev->private->devno, sch->schid.ssid,
+                     cdev->private->dev_id.devno, sch->schid.ssid,
                      sch->schid.sch_no, cdev->private->imask);
        return ret;
 }
@@ -328,8 +330,9 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
                CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
                              "cnt %02d, "
                              "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
-                             cdev->private->ssid,
-                             cdev->private->devno, irb->esw.esw0.erw.scnt,
+                             cdev->private->dev_id.ssid,
+                             cdev->private->dev_id.devno,
+                             irb->esw.esw0.erw.scnt,
                              irb->ecw[0], irb->ecw[1],
                              irb->ecw[2], irb->ecw[3],
                              irb->ecw[4], irb->ecw[5],
@@ -339,7 +342,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
        if (irb->scsw.cc == 3) {
                CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x,"
                              " lpm %02X, became 'not operational'\n",
-                             cdev->private->devno, sch->schid.ssid,
+                             cdev->private->dev_id.devno, sch->schid.ssid,
                              sch->schid.sch_no, cdev->private->imask);
                return -EACCES;
        }
@@ -362,7 +365,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
        if (irb->scsw.cc == 3) {
                CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x,"
                              " lpm %02X, became 'not operational'\n",
-                             cdev->private->devno, sch->schid.ssid,
+                             cdev->private->dev_id.devno, sch->schid.ssid,
                              sch->schid.sch_no, cdev->private->imask);
                return -EACCES;
        }
index caf148d5caadb55ec6eab663168ebc8180b908ea..3f7cbce4cd87ee6bad825f8ae130744405274c8a 100644 (file)
@@ -32,19 +32,18 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
                                 SCHN_STAT_CHN_CTRL_CHK |
                                 SCHN_STAT_INTF_CTRL_CHK)))
                return;
-               
        CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
                      "received"
                      " ... device %04x on subchannel 0.%x.%04x, dev_stat "
                      ": %02X sch_stat : %02X\n",
-                     cdev->private->devno, cdev->private->ssid,
-                     cdev->private->sch_no,
+                     cdev->private->dev_id.devno, cdev->private->schid.ssid,
+                     cdev->private->schid.sch_no,
                      irb->scsw.dstat, irb->scsw.cstat);
 
        if (irb->scsw.cc != 3) {
                char dbf_text[15];
 
-               sprintf(dbf_text, "chk%x", cdev->private->sch_no);
+               sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
                CIO_TRACE_EVENT(0, dbf_text);
                CIO_HEX_EVENT(0, irb, sizeof (struct irb));
        }
index cde822d8b5c82041698cf2b96f206f09ac2eec4e..0648ce5bb684219c551ce03b610c9df512a612a5 100644 (file)
@@ -1741,7 +1741,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
        void *ptr;
        int available;
 
-       sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no);
+       sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
        QDIO_DBF_TEXT0(0,setup,dbf_text);
        for (i=0;i<no_input_qs;i++) {
                q=irq_ptr->input_qs[i];
@@ -2924,7 +2924,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
 
        irq_ptr = cdev->private->qdio_data;
 
-       sprintf(dbf_text,"qehi%4x",cdev->private->sch_no);
+       sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
        QDIO_DBF_TEXT0(0,setup,dbf_text);
        QDIO_DBF_TEXT0(0,trace,dbf_text);
 
@@ -2943,7 +2943,7 @@ qdio_initialize(struct qdio_initialize *init_data)
        int rc;
        char dbf_text[15];
 
-       sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no);
+       sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
        QDIO_DBF_TEXT0(0,setup,dbf_text);
        QDIO_DBF_TEXT0(0,trace,dbf_text);
 
@@ -2964,7 +2964,7 @@ qdio_allocate(struct qdio_initialize *init_data)
        struct qdio_irq *irq_ptr;
        char dbf_text[15];
 
-       sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no);
+       sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
        QDIO_DBF_TEXT0(0,setup,dbf_text);
        QDIO_DBF_TEXT0(0,trace,dbf_text);
        if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
@@ -3187,7 +3187,7 @@ qdio_establish(struct qdio_initialize *init_data)
                tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
        }
 
-       sprintf(dbf_text,"qest%4x",cdev->private->sch_no);
+       sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
        QDIO_DBF_TEXT0(0,setup,dbf_text);
        QDIO_DBF_TEXT0(0,trace,dbf_text);
 
index d27e4f6d7045a3a5f21e38d6bab32455d83c3d8a..0d3660c28f7d496840598b908ff909ec753c8c7a 100644 (file)
@@ -4,10 +4,8 @@
  * Copyright (C) 2001 David S. Miller (davem@redhat.com)
  */
 
-#include <linux/kernel.h>
 #include <linux/kthread.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
+#include <linux/syscalls.h>
 #include <linux/delay.h>
 #include <asm/oplib.h>
 #include <asm/ebus.h>
index 728a133d0fc5b7c306443280e63d296e047cc5dc..6b6a855f3795d8cea72822c0b3bb8f5786b83581 100644 (file)
  */
 
 #include <linux/module.h>
-#include <linux/sched.h>
+#include <linux/init.h>
 #include <linux/kthread.h>
-#include <linux/errno.h>
 #include <linux/delay.h>
 #include <linux/ioport.h>
-#include <linux/init.h>
 #include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
+#include <linux/syscalls.h>
 
 #include <asm/ebus.h>
 #include <asm/uaccess.h>
index a0d1cee0be77540691637566def3b862631e1829..306f46b85a5522aff952da4fe6e0759a86b2a651 100644 (file)
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/blkdev.h>
 #include <asm/system.h>
 #include <linux/errno.h>
index 0d5713dfa204749bbe0b09f339893217918debe2..54756722dd5f74c000fde0fb1661eec92fe512c0 100644 (file)
@@ -82,7 +82,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include "scsi.h"
 #include <scsi/scsi_host.h>
 #include "dtc.h"
index 41b05fc45380807b897eae325f2429b133303e66..72794a7b6dcc5557bf5b3c5d4b67164e44da573d 100644 (file)
 #include <linux/pci.h>
 #include <linux/stat.h>
 #include <linux/delay.h>
+#include <linux/io.h>
 #include <scsi/scsicam.h>
 
-#include <asm/io.h>
 #include <asm/system.h>
 
 #include <scsi/scsi.h>
index c0edb662d863e232a9f45d7fcb6b431f00a985b3..7bac86dda88f9fdc053105ae149e9c9753125421 100644 (file)
@@ -884,7 +884,7 @@ megaraid_init_mbox(adapter_t *adapter)
 
        if (((magic64 == HBA_SIGNATURE_64_BIT) &&
                ((adapter->pdev->subsystem_device !=
-               PCI_SUBSYS_ID_MEGARAID_SATA_150_6) ||
+               PCI_SUBSYS_ID_MEGARAID_SATA_150_6) &&
                (adapter->pdev->subsystem_device !=
                PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
                (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
index 8ff1f2866f7bf0a8069ff3a0d425bd2a77f99c89..5ffec2721b28566738b6d620305cd18887fcde38 100644 (file)
@@ -97,8 +97,8 @@
 #include <linux/blkdev.h>
 #include <linux/stat.h>
 #include <linux/delay.h>
+#include <linux/io.h>
 
-#include <asm/io.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
index 2df6747cb76fb5ce68a410b42ed748513ab5b80f..0b7a70f61e0d973112c251bd74087b635f26c358 100644 (file)
 #include <asm/system.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/blkdev.h>
 #include <linux/interrupt.h>
 #include <linux/stat.h>
index 331e1cf159b05169b5e05af7487591f5b916b1db..30be76514c43c50e97942d58d390758bf612c7d7 100644 (file)
 #include <linux/blkdev.h>
 #include <linux/init.h>
 #include <linux/stat.h>
+#include <linux/io.h>
 
 #include <asm/system.h>
 #include <asm/dma.h>
-#include <asm/io.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
index 98ce88d802075b74d01f96b6259149e23958db1e..ff4fa25f9fd12cdac405130e096c2200d9eb40fc 100644 (file)
@@ -2935,7 +2935,7 @@ static void __devexit ioc4_serial_exit(void)
        uart_unregister_driver(&ioc4_uart_rs422);
 }
 
-module_init(ioc4_serial_init);
+late_initcall(ioc4_serial_init); /* Call only after tty init is done */
 module_exit(ioc4_serial_exit);
 
 MODULE_AUTHOR("Pat Gefre - Silicon Graphics Inc. (SGI) <pfg@sgi.com>");
index 266aa325569e0f4a3e819063b10c9b049c0dc84c..cfcc3caf49d8f5a0a71997ee1bb6b1833035e6ac 100644 (file)
@@ -808,7 +808,7 @@ static int sci_request_irq(struct sci_port *port)
                }
 
                if (request_irq(port->irqs[0], sci_mpxed_interrupt,
-                               SA_INTERRUPT, "sci", port)) {
+                               IRQF_DISABLED, "sci", port)) {
                        printk(KERN_ERR "sci: Cannot allocate irq.\n");
                        return -ENODEV;
                }
@@ -817,7 +817,7 @@ static int sci_request_irq(struct sci_port *port)
                        if (!port->irqs[i])
                                continue;
                        if (request_irq(port->irqs[i], handlers[i],
-                                       SA_INTERRUPT, desc[i], port)) {
+                                       IRQF_DISABLED, desc[i], port)) {
                                printk(KERN_ERR "sci: Cannot allocate irq.\n");
                                return -ENODEV;
                        }
index 73dd2eedaaad5cff1441c6a6e1bd0923641d1677..b2cc703b2b9e976e6fc74d0f9f652a7960612c4e 100644 (file)
@@ -1182,7 +1182,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
        return 0;
 }
 
-static struct console sunzilog_console = {
+static struct console sunzilog_console_ops = {
        .name   =       "ttyS",
        .write  =       sunzilog_console_write,
        .device =       uart_console_device,
@@ -1208,10 +1208,10 @@ static inline struct console *SUNZILOG_CONSOLE(void)
        if (i == NUM_CHANNELS)
                return NULL;
 
-       sunzilog_console.index = i;
+       sunzilog_console_ops.index = i;
        sunzilog_port_table[i].flags |= SUNZILOG_FLAG_IS_CONS;
 
-       return &sunzilog_console;
+       return &sunzilog_console_ops;
 }
 
 #else
index a3473162587745dc276f632537bd60cdd9732e4f..c66ba9ad833df2d4f4dd141e2246d9fb6ccdd901 100644 (file)
@@ -5,19 +5,6 @@
 menu "SN Devices"
        depends on SGI_SN
 
-config SGI_IOC4
-       tristate "SGI IOC4 Base IO support"
-       depends on MMTIMER
-       default m
-       ---help---
-       This option enables basic support for the SGI IOC4-based Base IO
-       controller card.  This option does not enable any specific
-       functions on such a card, but provides necessary infrastructure
-       for other drivers to utilize.
-
-       If you have an SGI Altix with an IOC4-based
-       I/O controller say Y.  Otherwise say N.
-
 config SGI_IOC3
        tristate "SGI IOC3 Base IO support"
        default m
index 2cda011597c0e018ea931a2393f59526effeba9a..693db8bb8d9c0659ceb94a0ae98c81bd6bc9f423 100644 (file)
@@ -3,5 +3,4 @@
 #
 #
 
-obj-$(CONFIG_SGI_IOC4) += ioc4.o
 obj-$(CONFIG_SGI_IOC3) += ioc3.o
diff --git a/drivers/sn/ioc4.c b/drivers/sn/ioc4.c
deleted file mode 100644 (file)
index 8562821..0000000
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2005 Silicon Graphics, Inc.  All Rights Reserved.
- */
-
-/* This file contains the master driver module for use by SGI IOC4 subdrivers.
- *
- * It allocates any resources shared between multiple subdevices, and
- * provides accessor functions (where needed) and the like for those
- * resources.  It also provides a mechanism for the subdevice modules
- * to support loading and unloading.
- *
- * Non-shared resources (e.g. external interrupt A_INT_OUT register page
- * alias, serial port and UART registers) are handled by the subdevice
- * modules themselves.
- *
- * This is all necessary because IOC4 is not implemented as a multi-function
- * PCI device, but an amalgamation of disparate registers for several
- * types of device (ATA, serial, external interrupts).  The normal
- * resource management in the kernel doesn't have quite the right interfaces
- * to handle this situation (e.g. multiple modules can't claim the same
- * PCI ID), thus this IOC4 master module.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/ioc4.h>
-#include <linux/mmtimer.h>
-#include <linux/rtc.h>
-#include <linux/mutex.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/shub_mmr.h>
-
-/***************
- * Definitions *
- ***************/
-
-/* Tweakable values */
-
-/* PCI bus speed detection/calibration */
-#define IOC4_CALIBRATE_COUNT 63        /* Calibration cycle period */
-#define IOC4_CALIBRATE_CYCLES 256      /* Average over this many cycles */
-#define IOC4_CALIBRATE_DISCARD 2       /* Discard first few cycles */
-#define IOC4_CALIBRATE_LOW_MHZ 25      /* Lower bound on bus speed sanity */
-#define IOC4_CALIBRATE_HIGH_MHZ 75     /* Upper bound on bus speed sanity */
-#define IOC4_CALIBRATE_DEFAULT_MHZ 66  /* Assumed if sanity check fails */
-
-/************************
- * Submodule management *
- ************************/
-
-static DEFINE_MUTEX(ioc4_mutex);
-
-static LIST_HEAD(ioc4_devices);
-static LIST_HEAD(ioc4_submodules);
-
-/* Register an IOC4 submodule */
-int
-ioc4_register_submodule(struct ioc4_submodule *is)
-{
-       struct ioc4_driver_data *idd;
-
-       mutex_lock(&ioc4_mutex);
-       list_add(&is->is_list, &ioc4_submodules);
-
-       /* Initialize submodule for each IOC4 */
-       if (!is->is_probe)
-               goto out;
-
-       list_for_each_entry(idd, &ioc4_devices, idd_list) {
-               if (is->is_probe(idd)) {
-                       printk(KERN_WARNING
-                              "%s: IOC4 submodule %s probe failed "
-                              "for pci_dev %s",
-                              __FUNCTION__, module_name(is->is_owner),
-                              pci_name(idd->idd_pdev));
-               }
-       }
- out:
-       mutex_unlock(&ioc4_mutex);
-       return 0;
-}
-
-/* Unregister an IOC4 submodule */
-void
-ioc4_unregister_submodule(struct ioc4_submodule *is)
-{
-       struct ioc4_driver_data *idd;
-
-       mutex_lock(&ioc4_mutex);
-       list_del(&is->is_list);
-
-       /* Remove submodule for each IOC4 */
-       if (!is->is_remove)
-               goto out;
-
-       list_for_each_entry(idd, &ioc4_devices, idd_list) {
-               if (is->is_remove(idd)) {
-                       printk(KERN_WARNING
-                              "%s: IOC4 submodule %s remove failed "
-                              "for pci_dev %s.\n",
-                              __FUNCTION__, module_name(is->is_owner),
-                              pci_name(idd->idd_pdev));
-               }
-       }
- out:
-       mutex_unlock(&ioc4_mutex);
-}
-
-/*********************
- * Device management *
- *********************/
-
-#define IOC4_CALIBRATE_LOW_LIMIT \
-       (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_LOW_MHZ)
-#define IOC4_CALIBRATE_HIGH_LIMIT \
-       (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_HIGH_MHZ)
-#define IOC4_CALIBRATE_DEFAULT \
-       (1000*IOC4_EXTINT_COUNT_DIVISOR/IOC4_CALIBRATE_DEFAULT_MHZ)
-
-#define IOC4_CALIBRATE_END \
-       (IOC4_CALIBRATE_CYCLES + IOC4_CALIBRATE_DISCARD)
-
-#define IOC4_INT_OUT_MODE_TOGGLE 0x7   /* Toggle INT_OUT every COUNT+1 ticks */
-
-/* Determines external interrupt output clock period of the PCI bus an
- * IOC4 is attached to.  This value can be used to determine the PCI
- * bus speed.
- *
- * IOC4 has a design feature that various internal timers are derived from
- * the PCI bus clock.  This causes IOC4 device drivers to need to take the
- * bus speed into account when setting various register values (e.g. INT_OUT
- * register COUNT field, UART divisors, etc).  Since this information is
- * needed by several subdrivers, it is determined by the main IOC4 driver,
- * even though the following code utilizes external interrupt registers
- * to perform the speed calculation.
- */
-static void
-ioc4_clock_calibrate(struct ioc4_driver_data *idd)
-{
-       extern unsigned long sn_rtc_cycles_per_second;
-       union ioc4_int_out int_out;
-       union ioc4_gpcr gpcr;
-       unsigned int state, last_state = 1;
-       uint64_t start = 0, end, period;
-       unsigned int count = 0;
-
-       /* Enable output */
-       gpcr.raw = 0;
-       gpcr.fields.dir = IOC4_GPCR_DIR_0;
-       gpcr.fields.int_out_en = 1;
-       writel(gpcr.raw, &idd->idd_misc_regs->gpcr_s.raw);
-
-       /* Reset to power-on state */
-       writel(0, &idd->idd_misc_regs->int_out.raw);
-       mmiowb();
-
-       /* Set up square wave */
-       int_out.raw = 0;
-       int_out.fields.count = IOC4_CALIBRATE_COUNT;
-       int_out.fields.mode = IOC4_INT_OUT_MODE_TOGGLE;
-       int_out.fields.diag = 0;
-       writel(int_out.raw, &idd->idd_misc_regs->int_out.raw);
-       mmiowb();
-
-       /* Check square wave period averaged over some number of cycles */
-       do {
-               int_out.raw = readl(&idd->idd_misc_regs->int_out.raw);
-               state = int_out.fields.int_out;
-               if (!last_state && state) {
-                       count++;
-                       if (count == IOC4_CALIBRATE_END) {
-                               end = rtc_time();
-                               break;
-                       } else if (count == IOC4_CALIBRATE_DISCARD)
-                               start = rtc_time();
-               }
-               last_state = state;
-       } while (1);
-
-       /* Calculation rearranged to preserve intermediate precision.
-        * Logically:
-        * 1. "end - start" gives us number of RTC cycles over all the
-        *    square wave cycles measured.
-        * 2. Divide by number of square wave cycles to get number of
-        *    RTC cycles per square wave cycle.
-        * 3. Divide by 2*(int_out.fields.count+1), which is the formula
-        *    by which the IOC4 generates the square wave, to get the
-        *    number of RTC cycles per IOC4 INT_OUT count.
-        * 4. Divide by sn_rtc_cycles_per_second to get seconds per
-        *    count.
-        * 5. Multiply by 1E9 to get nanoseconds per count.
-        */
-       period = ((end - start) * 1000000000) /
-           (IOC4_CALIBRATE_CYCLES * 2 * (IOC4_CALIBRATE_COUNT + 1)
-            * sn_rtc_cycles_per_second);
-
-       /* Bounds check the result. */
-       if (period > IOC4_CALIBRATE_LOW_LIMIT ||
-           period < IOC4_CALIBRATE_HIGH_LIMIT) {
-               printk(KERN_INFO
-                      "IOC4 %s: Clock calibration failed.  Assuming"
-                      "PCI clock is %d ns.\n",
-                      pci_name(idd->idd_pdev),
-                      IOC4_CALIBRATE_DEFAULT / IOC4_EXTINT_COUNT_DIVISOR);
-               period = IOC4_CALIBRATE_DEFAULT;
-       } else {
-               printk(KERN_DEBUG
-                      "IOC4 %s: PCI clock is %ld ns.\n",
-                      pci_name(idd->idd_pdev),
-                      period / IOC4_EXTINT_COUNT_DIVISOR);
-       }
-
-       /* Remember results.  We store the extint clock period rather
-        * than the PCI clock period so that greater precision is
-        * retained.  Divide by IOC4_EXTINT_COUNT_DIVISOR to get
-        * PCI clock period.
-        */
-       idd->count_period = period;
-}
-
-/* There are three variants of IOC4 cards: IO9, IO10, and PCI-RT.
- * Each brings out different combinations of IOC4 signals, thus.
- * the IOC4 subdrivers need to know to which we're attached.
- *
- * We look for the presence of a SCSI (IO9) or SATA (IO10) controller
- * on the same PCI bus at slot number 3 to differentiate IO9 from IO10.
- * If neither is present, it's a PCI-RT.
- */
-static unsigned int
-ioc4_variant(struct ioc4_driver_data *idd)
-{
-       struct pci_dev *pdev = NULL;
-       int found = 0;
-
-       /* IO9: Look for a QLogic ISP 12160 at the same bus and slot 3. */
-       do {
-               pdev = pci_get_device(PCI_VENDOR_ID_QLOGIC,
-                                     PCI_DEVICE_ID_QLOGIC_ISP12160, pdev);
-               if (pdev &&
-                   idd->idd_pdev->bus->number == pdev->bus->number &&
-                   3 == PCI_SLOT(pdev->devfn))
-                       found = 1;
-               pci_dev_put(pdev);
-       } while (pdev && !found);
-       if (NULL != pdev)
-               return IOC4_VARIANT_IO9;
-
-       /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
-       pdev = NULL;
-       do {
-               pdev = pci_get_device(PCI_VENDOR_ID_VITESSE,
-                                     PCI_DEVICE_ID_VITESSE_VSC7174, pdev);
-               if (pdev &&
-                   idd->idd_pdev->bus->number == pdev->bus->number &&
-                   3 == PCI_SLOT(pdev->devfn))
-                       found = 1;
-               pci_dev_put(pdev);
-       } while (pdev && !found);
-       if (NULL != pdev)
-               return IOC4_VARIANT_IO10;
-
-       /* PCI-RT: No SCSI/SATA controller will be present */
-       return IOC4_VARIANT_PCI_RT;
-}
-
-/* Adds a new instance of an IOC4 card */
-static int
-ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
-{
-       struct ioc4_driver_data *idd;
-       struct ioc4_submodule *is;
-       uint32_t pcmd;
-       int ret;
-
-       /* Enable IOC4 and take ownership of it */
-       if ((ret = pci_enable_device(pdev))) {
-               printk(KERN_WARNING
-                      "%s: Failed to enable IOC4 device for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(pdev));
-               goto out;
-       }
-       pci_set_master(pdev);
-
-       /* Set up per-IOC4 data */
-       idd = kmalloc(sizeof(struct ioc4_driver_data), GFP_KERNEL);
-       if (!idd) {
-               printk(KERN_WARNING
-                      "%s: Failed to allocate IOC4 data for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(pdev));
-               ret = -ENODEV;
-               goto out_idd;
-       }
-       idd->idd_pdev = pdev;
-       idd->idd_pci_id = pci_id;
-
-       /* Map IOC4 misc registers.  These are shared between subdevices
-        * so the main IOC4 module manages them.
-        */
-       idd->idd_bar0 = pci_resource_start(idd->idd_pdev, 0);
-       if (!idd->idd_bar0) {
-               printk(KERN_WARNING
-                      "%s: Unable to find IOC4 misc resource "
-                      "for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(idd->idd_pdev));
-               ret = -ENODEV;
-               goto out_pci;
-       }
-       if (!request_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs),
-                           "ioc4_misc")) {
-               printk(KERN_WARNING
-                      "%s: Unable to request IOC4 misc region "
-                      "for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(idd->idd_pdev));
-               ret = -ENODEV;
-               goto out_pci;
-       }
-       idd->idd_misc_regs = ioremap(idd->idd_bar0,
-                                    sizeof(struct ioc4_misc_regs));
-       if (!idd->idd_misc_regs) {
-               printk(KERN_WARNING
-                      "%s: Unable to remap IOC4 misc region "
-                      "for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(idd->idd_pdev));
-               ret = -ENODEV;
-               goto out_misc_region;
-       }
-
-       /* Failsafe portion of per-IOC4 initialization */
-
-       /* Detect card variant */
-       idd->idd_variant = ioc4_variant(idd);
-       printk(KERN_INFO "IOC4 %s: %s card detected.\n", pci_name(pdev),
-              idd->idd_variant == IOC4_VARIANT_IO9 ? "IO9" :
-              idd->idd_variant == IOC4_VARIANT_PCI_RT ? "PCI-RT" :
-              idd->idd_variant == IOC4_VARIANT_IO10 ? "IO10" : "unknown");
-
-       /* Initialize IOC4 */
-       pci_read_config_dword(idd->idd_pdev, PCI_COMMAND, &pcmd);
-       pci_write_config_dword(idd->idd_pdev, PCI_COMMAND,
-                              pcmd | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
-
-       /* Determine PCI clock */
-       ioc4_clock_calibrate(idd);
-
-       /* Disable/clear all interrupts.  Need to do this here lest
-        * one submodule request the shared IOC4 IRQ, but interrupt
-        * is generated by a different subdevice.
-        */
-       /* Disable */
-       writel(~0, &idd->idd_misc_regs->other_iec.raw);
-       writel(~0, &idd->idd_misc_regs->sio_iec);
-       /* Clear (i.e. acknowledge) */
-       writel(~0, &idd->idd_misc_regs->other_ir.raw);
-       writel(~0, &idd->idd_misc_regs->sio_ir);
-
-       /* Track PCI-device specific data */
-       idd->idd_serial_data = NULL;
-       pci_set_drvdata(idd->idd_pdev, idd);
-
-       mutex_lock(&ioc4_mutex);
-       list_add_tail(&idd->idd_list, &ioc4_devices);
-
-       /* Add this IOC4 to all submodules */
-       list_for_each_entry(is, &ioc4_submodules, is_list) {
-               if (is->is_probe && is->is_probe(idd)) {
-                       printk(KERN_WARNING
-                              "%s: IOC4 submodule 0x%s probe failed "
-                              "for pci_dev %s.\n",
-                              __FUNCTION__, module_name(is->is_owner),
-                              pci_name(idd->idd_pdev));
-               }
-       }
-       mutex_unlock(&ioc4_mutex);
-
-       return 0;
-
-out_misc_region:
-       release_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
-out_pci:
-       kfree(idd);
-out_idd:
-       pci_disable_device(pdev);
-out:
-       return ret;
-}
-
-/* Removes a particular instance of an IOC4 card. */
-static void
-ioc4_remove(struct pci_dev *pdev)
-{
-       struct ioc4_submodule *is;
-       struct ioc4_driver_data *idd;
-
-       idd = pci_get_drvdata(pdev);
-
-       /* Remove this IOC4 from all submodules */
-       mutex_lock(&ioc4_mutex);
-       list_for_each_entry(is, &ioc4_submodules, is_list) {
-               if (is->is_remove && is->is_remove(idd)) {
-                       printk(KERN_WARNING
-                              "%s: IOC4 submodule 0x%s remove failed "
-                              "for pci_dev %s.\n",
-                              __FUNCTION__, module_name(is->is_owner),
-                              pci_name(idd->idd_pdev));
-               }
-       }
-       mutex_unlock(&ioc4_mutex);
-
-       /* Release resources */
-       iounmap(idd->idd_misc_regs);
-       if (!idd->idd_bar0) {
-               printk(KERN_WARNING
-                      "%s: Unable to get IOC4 misc mapping for pci_dev %s. "
-                      "Device removal may be incomplete.\n",
-                      __FUNCTION__, pci_name(idd->idd_pdev));
-       }
-       release_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
-
-       /* Disable IOC4 and relinquish */
-       pci_disable_device(pdev);
-
-       /* Remove and free driver data */
-       mutex_lock(&ioc4_mutex);
-       list_del(&idd->idd_list);
-       mutex_unlock(&ioc4_mutex);
-       kfree(idd);
-}
-
-static struct pci_device_id ioc4_id_table[] = {
-       {PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC4, PCI_ANY_ID,
-        PCI_ANY_ID, 0x0b4000, 0xFFFFFF},
-       {0}
-};
-
-static struct pci_driver ioc4_driver = {
-       .name = "IOC4",
-       .id_table = ioc4_id_table,
-       .probe = ioc4_probe,
-       .remove = ioc4_remove,
-};
-
-MODULE_DEVICE_TABLE(pci, ioc4_id_table);
-
-/*********************
- * Module management *
- *********************/
-
-/* Module load */
-static int __devinit
-ioc4_init(void)
-{
-       return pci_register_driver(&ioc4_driver);
-}
-
-/* Module unload */
-static void __devexit
-ioc4_exit(void)
-{
-       pci_unregister_driver(&ioc4_driver);
-}
-
-module_init(ioc4_init);
-module_exit(ioc4_exit);
-
-MODULE_AUTHOR("Brent Casavant - Silicon Graphics, Inc. <bcasavan@sgi.com>");
-MODULE_DESCRIPTION("PCI driver master module for SGI IOC4 Base-IO Card");
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(ioc4_register_submodule);
-EXPORT_SYMBOL(ioc4_unregister_submodule);
index daaa486159cf37ae64d5329150466bf839272bdc..7a43020fa5835136ced2b4826ac3cffdd7823efb 100644 (file)
@@ -701,7 +701,6 @@ config FB_NVIDIA
        depends on FB && PCI
        select I2C_ALGOBIT if FB_NVIDIA_I2C
        select I2C if FB_NVIDIA_I2C
-       select FB_DDC if FB_NVIDIA_I2C
        select FB_MODE_HELPERS
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
index e48de3c9fd13ff0ee1f35df67a85281a719e26fd..19eef3a090232854860ad993365993dc7e5f68f6 100644 (file)
@@ -160,12 +160,51 @@ void nvidia_delete_i2c_busses(struct nvidia_par *par)
 
 }
 
+static u8 *nvidia_do_probe_i2c_edid(struct nvidia_i2c_chan *chan)
+{
+       u8 start = 0x0;
+       struct i2c_msg msgs[] = {
+               {
+                .addr = 0x50,
+                .len = 1,
+                .buf = &start,
+                }, {
+                    .addr = 0x50,
+                    .flags = I2C_M_RD,
+                    .len = EDID_LENGTH,
+                    },
+       };
+       u8 *buf;
+
+       if (!chan->par)
+               return NULL;
+
+       buf = kmalloc(EDID_LENGTH, GFP_KERNEL);
+       if (!buf) {
+               dev_warn(&chan->par->pci_dev->dev, "Out of memory!\n");
+               return NULL;
+       }
+       msgs[1].buf = buf;
+
+       if (i2c_transfer(&chan->adapter, msgs, 2) == 2)
+               return buf;
+       dev_dbg(&chan->par->pci_dev->dev, "Unable to read EDID block.\n");
+       kfree(buf);
+       return NULL;
+}
+
 int nvidia_probe_i2c_connector(struct fb_info *info, int conn, u8 **out_edid)
 {
        struct nvidia_par *par = info->par;
-       u8 *edid;
-
-       edid = fb_ddc_read(&par->chan[conn - 1].adapter);
+       u8 *edid = NULL;
+       int i;
+
+       for (i = 0; i < 3; i++) {
+               /* Do the real work */
+               edid = nvidia_do_probe_i2c_edid(&par->chan[conn - 1]);
+               if (edid)
+                       break;
+       }
 
        if (!edid && conn == 1) {
                /* try to get from firmware */
index 27c9d05d03ef23f1280754fab152393d6995bb14..c287a9ae4fdd56690444c44a2f6e57269e50cb5a 100644 (file)
@@ -2,7 +2,6 @@ menu "Dallas's 1-wire bus"
 
 config W1
        tristate "Dallas's 1-wire support"
-       depends on CONNECTOR
        ---help---
          Dallas' 1-wire bus is useful to connect slow 1-pin devices
          such as iButtons and thermal sensors.
index 599de54451af61c3297e4d18b068e5799b36e4b2..6a3df055280a01858d5925f491bcc33157498aab 100644 (file)
@@ -140,6 +140,73 @@ config EXT3_FS_SECURITY
          If you are not using a security module that requires using
          extended attributes for file security labels, say N.
 
+config EXT4DEV_FS
+       tristate "Ext4dev/ext4 extended fs support development (EXPERIMENTAL)"
+       depends on EXPERIMENTAL
+       select JBD2
+       help
+         Ext4dev is a predecessor filesystem of the next generation
+         extended fs ext4, based on ext3 filesystem code. It will be
+         renamed ext4 fs later, once ext4dev is mature and stabilized.
+
+         Unlike the change from ext2 filesystem to ext3 filesystem,
+         the on-disk format of ext4dev is not the same as ext3 any more:
+         it is based on extent maps and it supports 48-bit physical block
+         numbers. These combined on-disk format changes will allow
+         ext4dev/ext4 to handle more than 16 TB filesystem volumes --
+         a hard limit that ext3 cannot overcome without changing the
+         on-disk format.
+
+         Other than extent maps and 48-bit block numbers, ext4dev also is
+         likely to have other new features such as persistent preallocation,
+         high resolution time stamps, and larger file support etc.  These
+         features will be added to ext4dev gradually.
+
+         To compile this file system support as a module, choose M here. The
+         module will be called ext4dev.  Be aware, however, that the filesystem
+         of your root partition (the one containing the directory /) cannot
+         be compiled as a module, and so this could be dangerous.
+
+         If unsure, say N.
+
+config EXT4DEV_FS_XATTR
+       bool "Ext4dev extended attributes"
+       depends on EXT4DEV_FS
+       default y
+       help
+         Extended attributes are name:value pairs associated with inodes by
+         the kernel or by users (see the attr(5) manual page, or visit
+         <http://acl.bestbits.at/> for details).
+
+         If unsure, say N.
+
+         You need this for POSIX ACL support on ext4dev/ext4.
+
+config EXT4DEV_FS_POSIX_ACL
+       bool "Ext4dev POSIX Access Control Lists"
+       depends on EXT4DEV_FS_XATTR
+       select FS_POSIX_ACL
+       help
+         POSIX Access Control Lists (ACLs) support permissions for users and
+         groups beyond the owner/group/world scheme.
+
+         To learn more about Access Control Lists, visit the POSIX ACLs for
+         Linux website <http://acl.bestbits.at/>.
+
+         If you don't know what Access Control Lists are, say N
+
+config EXT4DEV_FS_SECURITY
+       bool "Ext4dev Security Labels"
+       depends on EXT4DEV_FS_XATTR
+       help
+         Security labels support alternative access control models
+         implemented by security modules like SELinux.  This option
+         enables an extended attribute handler for file security
+         labels in the ext4dev/ext4 filesystem.
+
+         If you are not using a security module that requires using
+         extended attributes for file security labels, say N.
+
 config JBD
        tristate
        help
@@ -172,12 +239,44 @@ config JBD_DEBUG
          generated.  To turn debugging off again, do
          "echo 0 > /proc/sys/fs/jbd-debug".
 
+config JBD2
+       tristate
+       help
+         This is a generic journaling layer for block devices that support
+         both 32-bit and 64-bit block numbers.  It is currently used by
+         the ext4dev/ext4 filesystem, but it could also be used to add
+         journal support to other file systems or block devices such
+         as RAID or LVM.
+
+         If you are using ext4dev/ext4, you need to say Y here. If you are not
+         using ext4dev/ext4 then you will probably want to say N.
+
+         To compile this device as a module, choose M here. The module will be
+         called jbd2.  If you are compiling ext4dev/ext4 into the kernel,
+         you cannot compile this code as a module.
+
+config JBD2_DEBUG
+       bool "JBD2 (ext4dev/ext4) debugging support"
+       depends on JBD2
+       help
+         If you are using the ext4dev/ext4 journaled file system (or
+         potentially any other filesystem/device using JBD2), this option
+         allows you to enable debugging output while the system is running,
+         in order to help track down any problems you are having.
+         By default, the debugging output will be turned off.
+
+         If you select Y here, then you will be able to turn on debugging
+         with "echo N > /proc/sys/fs/jbd2-debug", where N is a number between
+         1 and 5. The higher the number, the more debugging output is
+         generated.  To turn debugging off again, do
+         "echo 0 > /proc/sys/fs/jbd2-debug".
+
 config FS_MBCACHE
-# Meta block cache for Extended Attributes (ext2/ext3)
+# Meta block cache for Extended Attributes (ext2/ext3/ext4)
        tristate
-       depends on EXT2_FS_XATTR || EXT3_FS_XATTR
-       default y if EXT2_FS=y || EXT3_FS=y
-       default m if EXT2_FS=m || EXT3_FS=m
+       depends on EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4DEV_FS_XATTR
+       default y if EXT2_FS=y || EXT3_FS=y || EXT4DEV_FS=y
+       default m if EXT2_FS=m || EXT3_FS=m || EXT4DEV_FS=m
 
 config REISERFS_FS
        tristate "Reiserfs support"
@@ -1887,7 +1986,7 @@ config CIFS_EXPERIMENTAL
 config CIFS_UPCALL
          bool "Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)"
          depends on CIFS_EXPERIMENTAL
-         select CONNECTOR
+         depends on CONNECTOR
          help
            Enables an upcall mechanism for CIFS which will be used to contact
            userspace helper utilities to provide SPNEGO packaged Kerberos
index df614eacee8620c07b15bb3080f65405e4674392..9a5ce9323bfd0c65ffed56f597294b29db75efa9 100644 (file)
@@ -62,7 +62,9 @@ obj-$(CONFIG_DLM)             += dlm/
 # Do not add any filesystems before this line
 obj-$(CONFIG_REISERFS_FS)      += reiserfs/
 obj-$(CONFIG_EXT3_FS)          += ext3/ # Before ext2 so root fs can be ext3
+obj-$(CONFIG_EXT4DEV_FS)       += ext4/ # Before ext2 so root fs can be ext4dev
 obj-$(CONFIG_JBD)              += jbd/
+obj-$(CONFIG_JBD2)             += jbd2/
 obj-$(CONFIG_EXT2_FS)          += ext2/
 obj-$(CONFIG_CRAMFS)           += cramfs/
 obj-$(CONFIG_RAMFS)            += ramfs/
index cf8a2cb2850563d9c9d19741dd2c3b12d367b01a..a6ec75c56fcf76cf44b12f28969180e588eee966 100644 (file)
@@ -211,8 +211,8 @@ static int afs_dir_open(struct inode *inode, struct file *file)
 {
        _enter("{%lu}", inode->i_ino);
 
-       BUG_ON(sizeof(union afs_dir_block) != 2048);
-       BUG_ON(sizeof(union afs_dirent) != 32);
+       BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
+       BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
 
        if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED)
                return -ENOENT;
@@ -446,8 +446,8 @@ static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry,
        _enter("{%lu},%p{%s}", dir->i_ino, dentry, dentry->d_name.name);
 
        /* insanity checks first */
-       BUG_ON(sizeof(union afs_dir_block) != 2048);
-       BUG_ON(sizeof(union afs_dirent) != 32);
+       BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
+       BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
 
        if (dentry->d_name.len > 255) {
                _leave(" = -ENAMETOOLONG");
index 480ab178cba50eca22e35bb3af4d0709c0d37258..b13f32c8aeeea26914163e35e7b45a9782e779eb 100644 (file)
@@ -94,7 +94,6 @@ struct autofs_wait_queue {
 
 struct autofs_sb_info {
        u32 magic;
-       struct dentry *root;
        int pipefd;
        struct file *pipe;
        pid_t oz_pgrp;
@@ -229,4 +228,4 @@ out:
 }
 
 void autofs4_dentry_release(struct dentry *);
-
+extern void autofs4_kill_sb(struct super_block *);
index 5d9193332bef1b9eac190d80fb0fa5f97ff28f8a..723a1c5e361b2786c8f8ec97e9332b77b1484e15 100644 (file)
@@ -24,7 +24,7 @@ static struct file_system_type autofs_fs_type = {
        .owner          = THIS_MODULE,
        .name           = "autofs",
        .get_sb         = autofs_get_sb,
-       .kill_sb        = kill_anon_super,
+       .kill_sb        = autofs4_kill_sb,
 };
 
 static int __init init_autofs4_fs(void)
index 800ce876caeca6e33fc427e40d34d89a69923ed3..51fd8595bf85197d252ce3414b73539dd0b9eae8 100644 (file)
@@ -96,7 +96,7 @@ void autofs4_free_ino(struct autofs_info *ino)
  */
 static void autofs4_force_release(struct autofs_sb_info *sbi)
 {
-       struct dentry *this_parent = sbi->root;
+       struct dentry *this_parent = sbi->sb->s_root;
        struct list_head *next;
 
        spin_lock(&dcache_lock);
@@ -127,7 +127,7 @@ resume:
                spin_lock(&dcache_lock);
        }
 
-       if (this_parent != sbi->root) {
+       if (this_parent != sbi->sb->s_root) {
                struct dentry *dentry = this_parent;
 
                next = this_parent->d_u.d_child.next;
@@ -140,15 +140,9 @@ resume:
                goto resume;
        }
        spin_unlock(&dcache_lock);
-
-       dput(sbi->root);
-       sbi->root = NULL;
-       shrink_dcache_sb(sbi->sb);
-
-       return;
 }
 
-static void autofs4_put_super(struct super_block *sb)
+void autofs4_kill_sb(struct super_block *sb)
 {
        struct autofs_sb_info *sbi = autofs4_sbi(sb);
 
@@ -163,6 +157,7 @@ static void autofs4_put_super(struct super_block *sb)
        kfree(sbi);
 
        DPRINTK("shutting down");
+       kill_anon_super(sb);
 }
 
 static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
@@ -189,7 +184,6 @@ static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt)
 }
 
 static struct super_operations autofs4_sops = {
-       .put_super      = autofs4_put_super,
        .statfs         = simple_statfs,
        .show_options   = autofs4_show_options,
 };
@@ -315,7 +309,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
 
        s->s_fs_info = sbi;
        sbi->magic = AUTOFS_SBI_MAGIC;
-       sbi->root = NULL;
        sbi->pipefd = -1;
        sbi->catatonic = 0;
        sbi->exp_timeout = 0;
@@ -396,13 +389,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
        sbi->pipe = pipe;
        sbi->pipefd = pipefd;
 
-       /*
-        * Take a reference to the root dentry so we get a chance to
-        * clean up the dentry tree on umount.
-        * See autofs4_force_release.
-        */
-       sbi->root = dget(root);
-
        /*
         * Success! Install the root dentry now to indicate completion.
         */
index ce103e7b0bc360f684a2556c55ea9ed8be0e38f2..c0a6c8d445c7b7cffc8a7c5163bd32d7ba6bdbd7 100644 (file)
@@ -45,7 +45,6 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
                fput(sbi->pipe);        /* Close the pipe */
                sbi->pipe = NULL;
        }
-       shrink_dcache_sb(sbi->sb);
 }
 
 static int autofs4_write(struct file *file, const void *addr, int bytes)
index 06435f3665f472f7a1e7ff4879627bff3064c740..79b05a1a436582ebfd2415a682cce4a322be00d5 100644 (file)
@@ -1152,7 +1152,7 @@ static int dump_write(struct file *file, const void *addr, int nr)
 static int dump_seek(struct file *file, loff_t off)
 {
        if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
-               if (file->f_op->llseek(file, off, 1) != off)
+               if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
                        return 0;
        } else {
                char *buf = (char *)get_zeroed_page(GFP_KERNEL);
@@ -1220,7 +1220,7 @@ static int notesize(struct memelfnote *en)
 
 static int alignfile(struct file *file, loff_t *foffset)
 {
-       char buf[4] = { 0, };
+       static const char buf[4] = { 0, };
        DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
        return 1;
 }
@@ -1569,7 +1569,8 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
 
        DUMP_WRITE(elf, sizeof(*elf));
        offset += sizeof(*elf);                         /* Elf header */
-       offset += (segs+1) * sizeof(struct elf_phdr);   /* Program headers */
+       offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */
+       foffset = offset;
 
        /* Write notes phdr entry */
        {
@@ -1586,8 +1587,6 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
                DUMP_WRITE(&phdr, sizeof(phdr));
        }
 
-       foffset = offset;
-
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
        /* Write program headers for segments dump */
@@ -1612,7 +1611,6 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
                phdr.p_align = ELF_EXEC_PAGESIZE;
 
                DUMP_WRITE(&phdr, sizeof(phdr));
-               foffset += sizeof(phdr);
        }
 
 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
index 8f93e939f21375abe2c205fd2783c319098cc87f..f95c8749499f9db7fe799594161a183ed9896910 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -79,7 +79,6 @@ static struct bio_set *fs_bio_set;
 static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
 {
        struct bio_vec *bvl;
-       struct biovec_slab *bp;
 
        /*
         * see comment near bvec_array define!
@@ -98,10 +97,12 @@ static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned lon
         * idx now points to the pool we want to allocate from
         */
 
-       bp = bvec_slabs + *idx;
        bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
-       if (bvl)
+       if (bvl) {
+               struct biovec_slab *bp = bvec_slabs + *idx;
+
                memset(bvl, 0, bp->nr_vecs * sizeof(struct bio_vec));
+       }
 
        return bvl;
 }
@@ -166,7 +167,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 
                bio_init(bio);
                if (likely(nr_iovecs)) {
-                       unsigned long idx;
+                       unsigned long idx = 0; /* shut up gcc */
 
                        bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
                        if (unlikely(!bvl)) {
index eeb8ac1aa8561f241ff007fcf3c4bd943ffffdb8..35527dca1dbcc415d95d76780537c577808df9ff 100644 (file)
@@ -452,6 +452,7 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                               bdevname(bh->b_bdev, b));
                }
                set_bit(AS_EIO, &page->mapping->flags);
+               set_buffer_write_io_error(bh);
                clear_buffer_uptodate(bh);
                SetPageError(page);
        }
@@ -571,6 +572,10 @@ EXPORT_SYMBOL(mark_buffer_async_write);
 static inline void __remove_assoc_queue(struct buffer_head *bh)
 {
        list_del_init(&bh->b_assoc_buffers);
+       WARN_ON(!bh->b_assoc_map);
+       if (buffer_write_io_error(bh))
+               set_bit(AS_EIO, &bh->b_assoc_map->flags);
+       bh->b_assoc_map = NULL;
 }
 
 int inode_has_buffers(struct inode *inode)
@@ -669,6 +674,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
                spin_lock(&buffer_mapping->private_lock);
                list_move_tail(&bh->b_assoc_buffers,
                                &mapping->private_list);
+               bh->b_assoc_map = mapping;
                spin_unlock(&buffer_mapping->private_lock);
        }
 }
@@ -765,7 +771,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
        spin_lock(lock);
        while (!list_empty(list)) {
                bh = BH_ENTRY(list->next);
-               list_del_init(&bh->b_assoc_buffers);
+               __remove_assoc_queue(bh);
                if (buffer_dirty(bh) || buffer_locked(bh)) {
                        list_add(&bh->b_assoc_buffers, &tmp);
                        if (buffer_dirty(bh)) {
@@ -786,7 +792,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 
        while (!list_empty(&tmp)) {
                bh = BH_ENTRY(tmp.prev);
-               __remove_assoc_queue(bh);
+               list_del_init(&bh->b_assoc_buffers);
                get_bh(bh);
                spin_unlock(lock);
                wait_on_buffer(bh);
@@ -1042,8 +1048,21 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
        } while ((size << sizebits) < PAGE_SIZE);
 
        index = block >> sizebits;
-       block = index << sizebits;
 
+       /*
+        * Check for a block which wants to lie outside our maximum possible
+        * pagecache index.  (this comparison is done using sector_t types).
+        */
+       if (unlikely(index != block >> sizebits)) {
+               char b[BDEVNAME_SIZE];
+
+               printk(KERN_ERR "%s: requested out-of-range block %llu for "
+                       "device %s\n",
+                       __FUNCTION__, (unsigned long long)block,
+                       bdevname(bdev, b));
+               return -EIO;
+       }
+       block = index << sizebits;
        /* Create a page with the proper size buffers.. */
        page = grow_dev_page(bdev, block, index, size);
        if (!page)
@@ -1070,12 +1089,16 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
 
        for (;;) {
                struct buffer_head * bh;
+               int ret;
 
                bh = __find_get_block(bdev, block, size);
                if (bh)
                        return bh;
 
-               if (!grow_buffers(bdev, block, size))
+               ret = grow_buffers(bdev, block, size);
+               if (ret < 0)
+                       return NULL;
+               if (ret == 0)
                        free_more_memory();
        }
 }
@@ -1150,6 +1173,7 @@ void __bforget(struct buffer_head *bh)
 
                spin_lock(&buffer_mapping->private_lock);
                list_del_init(&bh->b_assoc_buffers);
+               bh->b_assoc_map = NULL;
                spin_unlock(&buffer_mapping->private_lock);
        }
        __brelse(bh);
@@ -1837,6 +1861,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                        clear_buffer_new(bh);
                        kaddr = kmap_atomic(page, KM_USER0);
                        memset(kaddr+block_start, 0, bh->b_size);
+                       flush_dcache_page(page);
                        kunmap_atomic(kaddr, KM_USER0);
                        set_buffer_uptodate(bh);
                        mark_buffer_dirty(bh);
@@ -2343,6 +2368,7 @@ failed:
         */
        kaddr = kmap_atomic(page, KM_USER0);
        memset(kaddr, 0, PAGE_CACHE_SIZE);
+       flush_dcache_page(page);
        kunmap_atomic(kaddr, KM_USER0);
        SetPageUptodate(page);
        set_page_dirty(page);
index d0776ac2b8048938bcc58b0615b52b53e29a3c02..5eff35d6e564ac95550697f7b884a9612ac667ca 100644 (file)
@@ -31,8 +31,8 @@ struct cifs_sid {
 } __attribute__((packed));
 
 /* everyone */
-extern const struct cifs_sid sid_everyone;
+/* extern const struct cifs_sid sid_everyone;*/
 /* group users */
-extern const struct cifs_sid sid_user;
+/* extern const struct cifs_sid sid_user;*/
 
 #endif /* _CIFSACL_H */
index 03e359b3286117922f0a47aad41a9f1be1e6c823..152fa2dcfc6c70e80741f92be2e56ab7e5799226 100644 (file)
@@ -27,8 +27,6 @@ extern void mdfour(unsigned char *out, unsigned char *in, int n);
 /* smbdes.c */
 extern void E_P16(unsigned char *p14, unsigned char *p16);
 extern void E_P24(unsigned char *p21, unsigned char *c8, unsigned char *p24);
-extern void D_P16(unsigned char *p14, unsigned char *in, unsigned char *out);
-extern void E_old_pw_hash(unsigned char *, unsigned char *, unsigned char *);
 
 
 
index c00c654f2e11c0ce9cdbb3d597bb1afe9e673ddb..84976cdbe7136c4b76ad0777c924d44fd4b613cd 100644 (file)
@@ -63,6 +63,7 @@ extern struct task_struct * oplockThread; /* remove sparse warning */
 struct task_struct * oplockThread = NULL;
 extern struct task_struct * dnotifyThread; /* remove sparse warning */
 struct task_struct * dnotifyThread = NULL;
+static struct super_operations cifs_super_ops; 
 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
 module_param(CIFSMaxBufSize, int, 0);
 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
@@ -198,10 +199,12 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
     /* Only need to call the old QFSInfo if failed
     on newer one */
     if(rc)
-       rc = CIFSSMBQFSInfo(xid, pTcon, buf);
+       if(pTcon->ses->capabilities & CAP_NT_SMBS)
+               rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */
 
-       /* Old Windows servers do not support level 103, retry with level 
-          one if old server failed the previous call */ 
+       /* Some old Windows servers also do not support level 103, retry with
+          older level one if old server failed the previous call or we
+          bypassed it because we detected that this was an older LANMAN sess */
        if(rc)
                rc = SMBOldQFSInfo(xid, pTcon, buf);
        /*     
@@ -435,13 +438,21 @@ static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
        return;
 }
 
+#ifdef CONFIG_CIFS_STATS2
+static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
+{
+       /* BB FIXME */
+       return 0;
+}
+#endif
+
 static int cifs_remount(struct super_block *sb, int *flags, char *data)
 {
        *flags |= MS_NODIRATIME;
        return 0;
 }
 
-struct super_operations cifs_super_ops = {
+static struct super_operations cifs_super_ops = {
        .read_inode = cifs_read_inode,
        .put_super = cifs_put_super,
        .statfs = cifs_statfs,
@@ -454,6 +465,9 @@ struct super_operations cifs_super_ops = {
        .show_options = cifs_show_options,
        .umount_begin   = cifs_umount_begin,
        .remount_fs = cifs_remount,
+#ifdef CONFIG_CIFS_STATS2
+       .show_stats = cifs_show_stats,
+#endif
 };
 
 static int
@@ -495,7 +509,7 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
 {
        /* origin == SEEK_END => we must revalidate the cached file length */
-       if (origin == 2) {
+       if (origin == SEEK_END) {
                int retval = cifs_revalidate(file->f_dentry);
                if (retval < 0)
                        return (loff_t)retval;
@@ -903,7 +917,7 @@ init_cifs(void)
 #ifdef CONFIG_PROC_FS
        cifs_proc_init();
 #endif
-       INIT_LIST_HEAD(&GlobalServerList);      /* BB not implemented yet */
+/*     INIT_LIST_HEAD(&GlobalServerList);*/    /* BB not implemented yet */
        INIT_LIST_HEAD(&GlobalSMBSessionList);
        INIT_LIST_HEAD(&GlobalTreeConnectionList);
        INIT_LIST_HEAD(&GlobalOplock_Q);
@@ -931,6 +945,7 @@ init_cifs(void)
        GlobalCurrentXid = 0;
        GlobalTotalActiveXid = 0;
        GlobalMaxActiveXid = 0;
+       memset(Local_System_Name, 0, 15);
        rwlock_init(&GlobalSMBSeslock);
        spin_lock_init(&GlobalMid_Lock);
 
index bea875d9a46acda0578f7c92f7c84232824a0ff0..a243f779b363a9a9cff261f65f23027dc9e30c15 100644 (file)
@@ -36,7 +36,7 @@ extern const struct address_space_operations cifs_addr_ops;
 extern const struct address_space_operations cifs_addr_ops_smallbuf;
 
 /* Functions related to super block operations */
-extern struct super_operations cifs_super_ops;
+/* extern struct super_operations cifs_super_ops;*/
 extern void cifs_read_inode(struct inode *);
 extern void cifs_delete_inode(struct inode *);
 /* extern void cifs_write_inode(struct inode *); *//* BB not needed yet */
index b24006c47df10b720cf2d05e98bda80c124c19c2..74d3ccbb103bfdbaf82b56cb03272ceebf9c022e 100644 (file)
@@ -153,7 +153,7 @@ struct TCP_Server_Info {
        char sessid[4];         /* unique token id for this session */
        /* (returned on Negotiate */
        int capabilities; /* allow selective disabling of caps by smb sess */
-       __u16 timeZone;
+       int timeAdj;  /* Adjust for difference in server time zone in sec */
        __u16 CurrentMid;         /* multiplex id - rotating counter */
        char cryptKey[CIFS_CRYPTO_KEY_SIZE];
        /* 16th byte of RFC1001 workstation name is always null */
@@ -203,9 +203,14 @@ struct cifsSesInfo {
        char * domainName;
        char * password;
 };
-/* session flags */
+/* no more than one of the following three session flags may be set */
 #define CIFS_SES_NT4 1
-
+#define CIFS_SES_OS2 2
+#define CIFS_SES_W9X 4
+/* following flag is set for old servers such as OS2 (and Win95?)
+   which do not negotiate NTLM or POSIX dialects, but instead
+   negotiate one of the older LANMAN dialects */
+#define CIFS_SES_LANMAN 8
 /*
  * there is one of these for each connection to a resource on a particular
  * session 
@@ -512,7 +517,8 @@ require use of the stronger protocol */
  * This list helps improve performance and eliminate the messages indicating
  * that we had a communications error talking to the server in this list. 
  */
-GLOBAL_EXTERN struct servers_not_supported *NotSuppList;       /*@z4a */
+/* Feature not supported */
+/* GLOBAL_EXTERN struct servers_not_supported *NotSuppList; */
 
 /*
  * The following is a hash table of all the users we know about.
@@ -568,7 +574,6 @@ GLOBAL_EXTERN unsigned int lookupCacheEnabled;
 GLOBAL_EXTERN unsigned int extended_security;  /* if on, session setup sent 
                                with more secure ntlmssp2 challenge/resp */
 GLOBAL_EXTERN unsigned int sign_CIFS_PDUs;  /* enable smb packet signing */
-GLOBAL_EXTERN unsigned int secFlags;
 GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
 GLOBAL_EXTERN unsigned int CIFSMaxBufSize;  /* max size not including hdr */
 GLOBAL_EXTERN unsigned int cifs_min_rcv;    /* min size of big ntwrk buf pool */
index 81df2bf8e75a70d222f85757c5afdd1df9f20246..6df9dadba647035ad58f719a5e322bca1b9ac8de 100644 (file)
@@ -26,7 +26,8 @@
 
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 #define LANMAN_PROT 0
-#define CIFS_PROT   1
+#define LANMAN2_PROT 1
+#define CIFS_PROT   2
 #else
 #define CIFS_PROT   0
 #endif
@@ -408,6 +409,8 @@ typedef struct negotiate_req {
 
 /* Dialect index is 13 for LANMAN */
 
+#define MIN_TZ_ADJ (15 * 60) /* minimum grid for timezones in seconds */
+
 typedef struct lanman_neg_rsp {
        struct smb_hdr hdr;     /* wct = 13 */
        __le16 DialectIndex;
@@ -417,7 +420,10 @@ typedef struct lanman_neg_rsp {
        __le16 MaxNumberVcs;
        __le16 RawMode;
        __le32 SessionKey;
-       __le32 ServerTime;
+       struct {
+               __le16 Time;
+               __le16 Date;
+       } __attribute__((packed)) SrvTime;
        __le16 ServerTimeZone;
        __le16 EncryptionKeyLength;
        __le16 Reserved;
@@ -674,7 +680,7 @@ typedef union smb_com_tree_disconnect {     /* as an altetnative can use flag on
 typedef struct smb_com_close_req {
        struct smb_hdr hdr;     /* wct = 3 */
        __u16 FileID;
-       __u32 LastWriteTime;    /* should be zero */
+       __u32 LastWriteTime;    /* should be zero or -1 */
        __u16 ByteCount;        /* 0 */
 } __attribute__((packed)) CLOSE_REQ;
 
index b35c55c3c8bb9d76b1afc8ae9e13189d2f3f6071..f1f8225102f0a6dead4670ef95c6fa42fe0917e3 100644 (file)
@@ -50,12 +50,12 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
 extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
                        struct kvec *, int /* nvec to send */, 
                        int * /* type of buf returned */ , const int long_op);
-extern int SendReceiveBlockingLock(const unsigned int /* xid */ , struct cifsTconInfo *,
+extern int SendReceiveBlockingLock(const unsigned int /* xid */ , 
+                                       struct cifsTconInfo *,
                                struct smb_hdr * /* input */ ,
                                struct smb_hdr * /* out */ ,
                                int * /* bytes returned */);
-extern int checkSMBhdr(struct smb_hdr *smb, __u16 mid);
-extern int checkSMB(struct smb_hdr *smb, __u16 mid, int length);
+extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
 extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *);
 extern int is_size_safe_to_change(struct cifsInodeInfo *);
 extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *);
@@ -80,6 +80,9 @@ extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16,
 extern void DeleteOplockQEntry(struct oplock_q_entry *);
 extern struct timespec cifs_NTtimeToUnix(u64 /* utc nanoseconds since 1601 */ );
 extern u64 cifs_UnixTimeToNT(struct timespec);
+extern __le64 cnvrtDosCifsTm(__u16 date, __u16 time);
+extern struct timespec cnvrtDosUnixTm(__u16 date, __u16 time);
+
 extern int cifs_get_inode_info(struct inode **pinode,
                        const unsigned char *search_path, 
                        FILE_ALL_INFO * pfile_info,
@@ -116,6 +119,7 @@ extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
 extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
                        const unsigned char *searchName,
                        FILE_ALL_INFO * findData,
+                       int legacy /* whether to use old info level */,
                        const struct nls_table *nls_codepage, int remap);
 extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
                         const unsigned char *searchName,
@@ -279,8 +283,6 @@ extern void sesInfoFree(struct cifsSesInfo *);
 extern struct cifsTconInfo *tconInfoAlloc(void);
 extern void tconInfoFree(struct cifsTconInfo *);
 
-extern int cifs_reconnect(struct TCP_Server_Info *server);
-
 extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *,__u32 *);
 extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
                          __u32 *);
index 075d8fb3d37608a96d0610c519794388351d1173..098790eb2aa161967538c91b734c3fe45fa5d8b7 100644 (file)
@@ -46,6 +46,7 @@ static struct {
 } protocols[] = {
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
        {LANMAN_PROT, "\2LM1.2X002"},
+       {LANMAN2_PROT, "\2LANMAN2.1"},
 #endif /* weak password hashing for legacy clients */
        {CIFS_PROT, "\2NT LM 0.12"}, 
        {POSIX_PROT, "\2POSIX 2"},
@@ -58,6 +59,7 @@ static struct {
 } protocols[] = {
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
        {LANMAN_PROT, "\2LM1.2X002"},
+       {LANMAN2_PROT, "\2LANMAN2.1"},
 #endif /* weak password hashing for legacy clients */
        {CIFS_PROT, "\2NT LM 0.12"}, 
        {BAD_PROT, "\2"}
@@ -67,13 +69,13 @@ static struct {
 /* define the number of elements in the cifs dialect array */
 #ifdef CONFIG_CIFS_POSIX
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
-#define CIFS_NUM_PROT 3
+#define CIFS_NUM_PROT 4
 #else
 #define CIFS_NUM_PROT 2
 #endif /* CIFS_WEAK_PW_HASH */
 #else /* not posix */
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
-#define CIFS_NUM_PROT 2
+#define CIFS_NUM_PROT 3
 #else
 #define CIFS_NUM_PROT 1
 #endif /* CONFIG_CIFS_WEAK_PW_HASH */
@@ -397,6 +399,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
        struct TCP_Server_Info * server;
        u16 count;
        unsigned int secFlags;
+       u16 dialect;
 
        if(ses->server)
                server = ses->server;
@@ -436,9 +439,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
        if (rc != 0) 
                goto neg_err_exit;
 
-       cFYI(1,("Dialect: %d", pSMBr->DialectIndex));
+       dialect = le16_to_cpu(pSMBr->DialectIndex);
+       cFYI(1,("Dialect: %d", dialect));
        /* Check wct = 1 error case */
-       if((pSMBr->hdr.WordCount < 13) || (pSMBr->DialectIndex == BAD_PROT)) {
+       if((pSMBr->hdr.WordCount < 13) || (dialect == BAD_PROT)) {
                /* core returns wct = 1, but we do not ask for core - otherwise
                small wct just comes when dialect index is -1 indicating we 
                could not negotiate a common dialect */
@@ -446,7 +450,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                goto neg_err_exit;
 #ifdef CONFIG_CIFS_WEAK_PW_HASH 
        } else if((pSMBr->hdr.WordCount == 13)
-                       && (pSMBr->DialectIndex == LANMAN_PROT)) {
+                       && ((dialect == LANMAN_PROT)
+                               || (dialect == LANMAN2_PROT))) {
+               __s16 tmp;
                struct lanman_neg_rsp * rsp = (struct lanman_neg_rsp *)pSMBr;
 
                if((secFlags & CIFSSEC_MAY_LANMAN) || 
@@ -472,12 +478,44 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                        server->maxRw = 0;/* we do not need to use raw anyway */
                        server->capabilities = CAP_MPX_MODE;
                }
-               server->timeZone = le16_to_cpu(rsp->ServerTimeZone);
+               tmp = (__s16)le16_to_cpu(rsp->ServerTimeZone);
+               if (tmp == -1) {
+                       /* OS/2 often does not set timezone therefore
+                        * we must use server time to calc time zone.
+                        * Could deviate slightly from the right zone.
+                        * Smallest defined timezone difference is 15 minutes
+                        * (i.e. Nepal).  Rounding up/down is done to match
+                        * this requirement.
+                        */
+                       int val, seconds, remain, result;
+                       struct timespec ts, utc;
+                       utc = CURRENT_TIME;
+                       ts = cnvrtDosUnixTm(le16_to_cpu(rsp->SrvTime.Date),
+                                               le16_to_cpu(rsp->SrvTime.Time));
+                       cFYI(1,("SrvTime: %d sec since 1970 (utc: %d) diff: %d",
+                               (int)ts.tv_sec, (int)utc.tv_sec, 
+                               (int)(utc.tv_sec - ts.tv_sec)));
+                       val = (int)(utc.tv_sec - ts.tv_sec);
+                       seconds = val < 0 ? -val : val;
+                       result = (seconds / MIN_TZ_ADJ) * MIN_TZ_ADJ;
+                       remain = seconds % MIN_TZ_ADJ;
+                       if(remain >= (MIN_TZ_ADJ / 2))
+                               result += MIN_TZ_ADJ;
+                       if(val < 0)
+                               result = - result;
+                       server->timeAdj = result;
+               } else {
+                       server->timeAdj = (int)tmp;
+                       server->timeAdj *= 60; /* also in seconds */
+               }
+               cFYI(1,("server->timeAdj: %d seconds", server->timeAdj));
+
 
                /* BB get server time for time conversions and add
                code to use it and timezone since this is not UTC */    
 
-               if (rsp->EncryptionKeyLength == cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
+               if (rsp->EncryptionKeyLength == 
+                               cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
                        memcpy(server->cryptKey, rsp->EncryptionKey,
                                CIFS_CRYPTO_KEY_SIZE);
                } else if (server->secMode & SECMODE_PW_ENCRYPT) {
@@ -531,7 +569,8 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
        cFYI(0, ("Max buf = %d", ses->server->maxBuf));
        GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
        server->capabilities = le32_to_cpu(pSMBr->Capabilities);
-       server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone);  
+       server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
+       server->timeAdj *= 60;
        if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
                memcpy(server->cryptKey, pSMBr->u.EncryptionKey,
                       CIFS_CRYPTO_KEY_SIZE);
@@ -1617,7 +1656,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
        pSMBr = (CLOSE_RSP *)pSMB; /* BB removeme BB */
 
        pSMB->FileID = (__u16) smb_file_id;
-       pSMB->LastWriteTime = 0;
+       pSMB->LastWriteTime = 0xFFFFFFFF;
        pSMB->ByteCount = 0;
        rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
                         (struct smb_hdr *) pSMBr, &bytes_returned, 0);
@@ -2773,9 +2812,11 @@ GetExtAttrOut:
 
 
 /* security id for everyone */
-const struct cifs_sid sid_everyone = {1, 1, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0}};
+const static struct cifs_sid sid_everyone = 
+               {1, 1, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0}};
 /* group users */
-const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {32, 545, 0, 0}};
+const static struct cifs_sid sid_user = 
+               {1, 2 , {0, 0, 0, 0, 0, 5}, {32, 545, 0, 0}};
 
 /* Convert CIFS ACL to POSIX form */
 static int parse_sec_desc(struct cifs_sid * psec_desc, int acl_len)
@@ -2856,7 +2897,6 @@ qsec_out:
        return rc;
 }
 
-
 /* Legacy Query Path Information call for lookup to old servers such
    as Win9x/WinME */
 int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
@@ -2898,7 +2938,16 @@ QInfRetry:
        if (rc) {
                cFYI(1, ("Send error in QueryInfo = %d", rc));
        } else if (pFinfo) {            /* decode response */
+               struct timespec ts;
+               __u32 time = le32_to_cpu(pSMBr->last_write_time);
+               /* BB FIXME - add time zone adjustment BB */
                memset(pFinfo, 0, sizeof(FILE_ALL_INFO));
+               ts.tv_nsec = 0;
+               ts.tv_sec = time;
+               /* decode time fields */
+               pFinfo->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts));
+               pFinfo->LastWriteTime = pFinfo->ChangeTime;
+               pFinfo->LastAccessTime = 0;
                pFinfo->AllocationSize =
                        cpu_to_le64(le32_to_cpu(pSMBr->size));
                pFinfo->EndOfFile = pFinfo->AllocationSize;
@@ -2922,6 +2971,7 @@ int
 CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
                 const unsigned char *searchName,
                 FILE_ALL_INFO * pFindData,
+                int legacy /* old style infolevel */,
                 const struct nls_table *nls_codepage, int remap)
 {
 /* level 263 SMB_QUERY_FILE_ALL_INFO */
@@ -2970,7 +3020,10 @@ QPathInfoRetry:
        byte_count = params + 1 /* pad */ ;
        pSMB->TotalParameterCount = cpu_to_le16(params);
        pSMB->ParameterCount = pSMB->TotalParameterCount;
-       pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
+       if(legacy)
+               pSMB->InformationLevel = cpu_to_le16(SMB_INFO_STANDARD);
+       else
+               pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO);
        pSMB->Reserved4 = 0;
        pSMB->hdr.smb_buf_length += byte_count;
        pSMB->ByteCount = cpu_to_le16(byte_count);
@@ -2982,13 +3035,24 @@ QPathInfoRetry:
        } else {                /* decode response */
                rc = validate_t2((struct smb_t2_rsp *)pSMBr);
 
-               if (rc || (pSMBr->ByteCount < 40)) 
+               if (rc) /* BB add auto retry on EOPNOTSUPP? */
+                       rc = -EIO;
+               else if (!legacy && (pSMBr->ByteCount < 40)) 
                        rc = -EIO;      /* bad smb */
+               else if(legacy && (pSMBr->ByteCount < 24))
+                       rc = -EIO;  /* 24 or 26 expected but we do not read last field */
                else if (pFindData){
+                       int size;
                        __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
+                       if(legacy) /* we do not read the last field, EAsize, fortunately
+                                          since it varies by subdialect and on Set vs. Get, is  
+                                          two bytes or 4 bytes depending but we don't care here */
+                               size = sizeof(FILE_INFO_STANDARD);
+                       else
+                               size = sizeof(FILE_ALL_INFO);
                        memcpy((char *) pFindData,
                               (char *) &pSMBr->hdr.Protocol +
-                              data_offset, sizeof (FILE_ALL_INFO));
+                              data_offset, size);
                } else
                    rc = -ENOMEM;
        }
@@ -3613,6 +3677,14 @@ getDFSRetry:
                strncpy(pSMB->RequestFileName, searchName, name_len);
        }
 
+       if(ses->server) {
+               if(ses->server->secMode &
+                  (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+                       pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+       }
+
+        pSMB->hdr.Uid = ses->Suid;
+
        params = 2 /* level */  + name_len /*includes null */ ;
        pSMB->TotalDataCount = 0;
        pSMB->DataCount = 0;
index c78762051da4e5b15da45fa06b46b66d85e55d82..4093d53329306bfc74c0487e06c09d434d7043a6 100644 (file)
@@ -109,7 +109,7 @@ static int ipv6_connect(struct sockaddr_in6 *psin_server,
         * wake up waiters on reconnection? - (not needed currently)
         */
 
-int
+static int
 cifs_reconnect(struct TCP_Server_Info *server)
 {
        int rc = 0;
@@ -771,13 +771,18 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
        separator[0] = ',';
        separator[1] = 0; 
 
-       memset(vol->source_rfc1001_name,0x20,15);
-       for(i=0;i < strnlen(utsname()->nodename,15);i++) {
-               /* does not have to be a perfect mapping since the field is
-               informational, only used for servers that do not support
-               port 445 and it can be overridden at mount time */
-               vol->source_rfc1001_name[i] = 
-                       toupper(utsname()->nodename[i]);
+       if (Local_System_Name[0] != 0)
+               memcpy(vol->source_rfc1001_name, Local_System_Name,15);
+       else {
+               char *nodename = utsname()->nodename;
+               int n = strnlen(nodename,15);
+               memset(vol->source_rfc1001_name,0x20,15);
+               for(i=0 ; i < n ; i++) {
+                       /* does not have to be perfect mapping since field is
+                       informational, only used for servers that do not support
+                       port 445 and it can be overridden at mount time */
+                       vol->source_rfc1001_name[i] = toupper(nodename[i]);
+               }
        }
        vol->source_rfc1001_name[15] = 0;
        /* null target name indicates to use *SMBSERVR default called name
@@ -3215,7 +3220,9 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
                        }
                        /* else do not bother copying these informational fields */
                }
-               if(smb_buffer_response->WordCount == 3)
+               if((smb_buffer_response->WordCount == 3) ||
+                        (smb_buffer_response->WordCount == 7))
+                       /* field is in same location */
                        tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
                else
                        tcon->Flags = 0;
@@ -3312,19 +3319,21 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
                first_time = 1;
        }
        if (!rc) {
+               pSesInfo->flags = 0;
                pSesInfo->capabilities = pSesInfo->server->capabilities;
                if(linuxExtEnabled == 0)
                        pSesInfo->capabilities &= (~CAP_UNIX);
        /*      pSesInfo->sequence_number = 0;*/
-               cFYI(1,("Security Mode: 0x%x Capabilities: 0x%x Time Zone: %d",
+               cFYI(1,("Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
                        pSesInfo->server->secMode,
                        pSesInfo->server->capabilities,
-                       pSesInfo->server->timeZone));
+                       pSesInfo->server->timeAdj));
                if(experimEnabled < 2)
                        rc = CIFS_SessSetup(xid, pSesInfo,
                                            first_time, nls_info);
                else if (extended_security
-                               && (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
+                               && (pSesInfo->capabilities 
+                                       & CAP_EXTENDED_SECURITY)
                                && (pSesInfo->server->secType == NTLMSSP)) {
                        rc = -EOPNOTSUPP;
                } else if (extended_security
@@ -3338,7 +3347,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
                        if (!rc) {
                                if(ntlmv2_flag) {
                                        char * v2_response;
-                                       cFYI(1,("Can use more secure NTLM version 2 password hash"));
+                                       cFYI(1,("more secure NTLM ver2 hash"));
                                        if(CalcNTLMv2_partial_mac_key(pSesInfo, 
                                                nls_info)) {
                                                rc = -ENOMEM;
index 6b90ef98e4cfe9cdfcd224bdb0a2df202b66612f..35d54bb0869ab67510449f4a412aa0018c9fcfc8 100644 (file)
@@ -337,6 +337,7 @@ int cifs_get_inode_info(struct inode **pinode,
                pfindData = (FILE_ALL_INFO *)buf;
                /* could do find first instead but this returns more info */
                rc = CIFSSMBQPathInfo(xid, pTcon, search_path, pfindData,
+                             0 /* not legacy */,
                              cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
                                CIFS_MOUNT_MAP_SPECIAL_CHR);
                /* BB optimize code so we do not make the above call
@@ -384,8 +385,10 @@ int cifs_get_inode_info(struct inode **pinode,
                /* get new inode */
                if (*pinode == NULL) {
                        *pinode = new_inode(sb);
-                       if (*pinode == NULL)
+                       if (*pinode == NULL) {
+                               kfree(buf);
                                return -ENOMEM;
+                       }
                        /* Is an i_ino of zero legal? Can we use that to check
                           if the server supports returning inode numbers?  Are
                           there other sanity checks we can use to ensure that
@@ -431,8 +434,11 @@ int cifs_get_inode_info(struct inode **pinode,
                (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
 
                /* Linux can not store file creation time so ignore it */
-               inode->i_atime =
-                   cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
+               if(pfindData->LastAccessTime)
+                       inode->i_atime = cifs_NTtimeToUnix
+                               (le64_to_cpu(pfindData->LastAccessTime));
+               else /* do not need to use current_fs_time - time not stored */
+                       inode->i_atime = CURRENT_TIME;
                inode->i_mtime =
                    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
                inode->i_ctime =
index a57f5d6e6213d6f23e693c88a98c3d539eb4ae6a..0bee8b7e521a2a153fa50e03bde68ec69865a364 100644 (file)
@@ -254,7 +254,11 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
                                tmpbuffer,
                                len - 1,
                                cifs_sb->local_nls);
-       else {
+       else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+               cERROR(1,("SFU style symlinks not implemented yet"));
+               /* add open and read as in fs/cifs/inode.c */
+       
+       } else {
                rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, GENERIC_READ,
                                OPEN_REPARSE_POINT,&fid, &oplock, NULL, 
                                cifs_sb->local_nls, 
index 7aa23490541f9d68edee8c465e489ec79ab4b95f..ccebf9b7eb86e2293d0cd52ee15137b6dbdd4ad4 100644 (file)
@@ -252,10 +252,11 @@ MD5Transform(__u32 buf[4], __u32 const in[16])
        buf[3] += d;
 }
 
+#if 0   /* currently unused */
 /***********************************************************************
  the rfc 2104 version of hmac_md5 initialisation.
 ***********************************************************************/
-void
+static void
 hmac_md5_init_rfc2104(unsigned char *key, int key_len,
                      struct HMACMD5Context *ctx)
 {
@@ -289,6 +290,7 @@ hmac_md5_init_rfc2104(unsigned char *key, int key_len,
        MD5Init(&ctx->ctx);
        MD5Update(&ctx->ctx, ctx->k_ipad, 64);
 }
+#endif
 
 /***********************************************************************
  the microsoft version of hmac_md5 initialisation.
@@ -350,7 +352,8 @@ hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx)
  single function to calculate an HMAC MD5 digest from data.
  use the microsoft hmacmd5 init method because the key is 16 bytes.
 ************************************************************/
-void
+#if 0 /* currently unused */
+static void
 hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
         unsigned char *digest)
 {
@@ -361,3 +364,4 @@ hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
        }
        hmac_md5_final(digest, &ctx);
 }
+#endif
index 00e1c5394fe1e8977d031f08a3e74f92b9ae10cc..f7d4f4197bac3cc6973e8c1c5de2ddc9c8721493 100644 (file)
@@ -27,12 +27,12 @@ void MD5Final(unsigned char digest[16], struct MD5Context *context);
 
 /* The following definitions come from lib/hmacmd5.c  */
 
-void hmac_md5_init_rfc2104(unsigned char *key, int key_len,
-                       struct HMACMD5Context *ctx);
+/* void hmac_md5_init_rfc2104(unsigned char *key, int key_len,
+                       struct HMACMD5Context *ctx);*/
 void hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
                        struct HMACMD5Context *ctx);
 void hmac_md5_update(const unsigned char *text, int text_len,
                        struct HMACMD5Context *ctx);
 void hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx);
-void hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
-                       unsigned char *digest);
+/* void hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
+                       unsigned char *digest);*/
index 22c937e5884f36baf85e49cb9f3c3fc014de4522..bbc9cd34b6ea42f02d85e9e31c140824b4ab5dd8 100644 (file)
@@ -389,7 +389,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
        return;
 }
 
-int
+static int
 checkSMBhdr(struct smb_hdr *smb, __u16 mid)
 {
        /* Make sure that this really is an SMB, that it is a response, 
@@ -418,26 +418,42 @@ checkSMBhdr(struct smb_hdr *smb, __u16 mid)
 }
 
 int
-checkSMB(struct smb_hdr *smb, __u16 mid, int length)
+checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
 {
        __u32 len = smb->smb_buf_length;
        __u32 clc_len;  /* calculated length */
        cFYI(0, ("checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len));
-       if (((unsigned int)length < 2 + sizeof (struct smb_hdr)) ||
-           (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)) {
-               if ((unsigned int)length < 2 + sizeof (struct smb_hdr)) {
-                       if (((unsigned int)length >= 
-                               sizeof (struct smb_hdr) - 1)
+
+       if (length < 2 + sizeof (struct smb_hdr)) {
+               if ((length >= sizeof (struct smb_hdr) - 1)
                            && (smb->Status.CifsError != 0)) {
-                               smb->WordCount = 0;
-                               /* some error cases do not return wct and bcc */
+                       smb->WordCount = 0;
+                       /* some error cases do not return wct and bcc */
+                       return 0;
+               } else if ((length == sizeof(struct smb_hdr) + 1) && 
+                               (smb->WordCount == 0)) {
+                       char * tmp = (char *)smb;
+                       /* Need to work around a bug in two servers here */
+                       /* First, check if the part of bcc they sent was zero */
+                       if (tmp[sizeof(struct smb_hdr)] == 0) {
+                               /* some servers return only half of bcc
+                                * on simple responses (wct, bcc both zero)
+                                * in particular have seen this on
+                                * ulogoffX and FindClose. This leaves
+                                * one byte of bcc potentially unitialized
+                                */
+                               /* zero rest of bcc */
+                               tmp[sizeof(struct smb_hdr)+1] = 0;
                                return 0;
-                       } else {
-                               cERROR(1, ("Length less than smb header size"));
                        }
+                       cERROR(1,("rcvd invalid byte count (bcc)"));
+               } else {
+                       cERROR(1, ("Length less than smb header size"));
                }
-               if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
-                       cERROR(1, ("smb length greater than MaxBufSize, mid=%d",
+               return 1;
+       }
+       if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+               cERROR(1, ("smb length greater than MaxBufSize, mid=%d",
                                   smb->Mid));
                return 1;
        }
@@ -446,7 +462,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
                return 1;
        clc_len = smbCalcSize_LE(smb);
 
-       if(4 + len != (unsigned int)length) {
+       if(4 + len != length) {
                cERROR(1, ("Length read does not match RFC1001 length %d",len));
                return 1;
        }
index ce87550e918f8d33d87573677287e24d2da234e9..992e80edc720bb782cc13d5dcb1843691a240adf 100644 (file)
@@ -909,3 +909,61 @@ cifs_UnixTimeToNT(struct timespec t)
        /* Convert to 100ns intervals and then add the NTFS time offset. */
        return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET;
 }
+
+static int total_days_of_prev_months[] =
+{0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334};
+
+
+__le64 cnvrtDosCifsTm(__u16 date, __u16 time)
+{
+       return cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(date, time)));
+}
+
+struct timespec cnvrtDosUnixTm(__u16 date, __u16 time)
+{
+       struct timespec ts;
+       int sec, min, days, month, year;
+       SMB_TIME * st = (SMB_TIME *)&time;
+       SMB_DATE * sd = (SMB_DATE *)&date;
+
+       cFYI(1,("date %d time %d",date, time));
+
+       sec = 2 * st->TwoSeconds;
+       min = st->Minutes;
+       if((sec > 59) || (min > 59))
+               cERROR(1,("illegal time min %d sec %d", min, sec));
+       sec += (min * 60);
+       sec += 60 * 60 * st->Hours;
+       if(st->Hours > 24)
+               cERROR(1,("illegal hours %d",st->Hours));
+       days = sd->Day;
+       month = sd->Month;
+       if((days > 31) || (month > 12))
+               cERROR(1,("illegal date, month %d day: %d", month, days));
+       month -= 1;
+       days += total_days_of_prev_months[month];
+       days += 3652; /* account for difference in days between 1980 and 1970 */
+       year = sd->Year;
+       days += year * 365;
+       days += (year/4); /* leap year */
+       /* generalized leap year calculation is more complex, ie no leap year
+       for years/100 except for years/400, but since the maximum number for DOS
+        year is 2**7, the last year is 1980+127, which means we need only
+        consider 2 special case years, ie the years 2000 and 2100, and only
+        adjust for the lack of leap year for the year 2100, as 2000 was a 
+        leap year (divisable by 400) */
+       if(year >= 120)  /* the year 2100 */
+               days = days - 1;  /* do not count leap year for the year 2100 */
+
+       /* adjust for leap year where we are still before leap day */
+       if(year != 120)
+               days -= ((year & 0x03) == 0) && (month < 2 ? 1 : 0);
+       sec += 24 * 60 * 60 * days; 
+
+       ts.tv_sec = sec;
+
+       /* cFYI(1,("sec after cnvrt dos to unix time %d",sec)); */
+
+       ts.tv_nsec = 0;
+       return ts;
+} 
index b27b34537bf23c2bf3bbc566c1155c3d331df7a1..b5b0a2a41befe85734ffc30dc47f68f76bf05790 100644 (file)
@@ -106,6 +106,17 @@ static int construct_dentry(struct qstr *qstring, struct file *file,
        return rc;
 }
 
+static void AdjustForTZ(struct cifsTconInfo * tcon, struct inode * inode)
+{
+       if((tcon) && (tcon->ses) && (tcon->ses->server)) {
+               inode->i_ctime.tv_sec += tcon->ses->server->timeAdj;
+               inode->i_mtime.tv_sec += tcon->ses->server->timeAdj;
+               inode->i_atime.tv_sec += tcon->ses->server->timeAdj;
+       }
+       return;
+}
+
+
 static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
                char * buf, int *pobject_type, int isNewInode)
 {
@@ -135,16 +146,23 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
                tmp_inode->i_ctime =
                      cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
        } else { /* legacy, OS2 and DOS style */
+/*             struct timespec ts;*/
                FIND_FILE_STANDARD_INFO * pfindData = 
                        (FIND_FILE_STANDARD_INFO *)buf;
 
+               tmp_inode->i_mtime = cnvrtDosUnixTm(
+                               le16_to_cpu(pfindData->LastWriteDate),
+                               le16_to_cpu(pfindData->LastWriteTime));
+               tmp_inode->i_atime = cnvrtDosUnixTm(
+                               le16_to_cpu(pfindData->LastAccessDate),
+                               le16_to_cpu(pfindData->LastAccessTime));
+                tmp_inode->i_ctime = cnvrtDosUnixTm(
+                                le16_to_cpu(pfindData->LastWriteDate),
+                                le16_to_cpu(pfindData->LastWriteTime));
+               AdjustForTZ(cifs_sb->tcon, tmp_inode);
                attr = le16_to_cpu(pfindData->Attributes);
                allocation_size = le32_to_cpu(pfindData->AllocationSize);
                end_of_file = le32_to_cpu(pfindData->DataSize);
-               tmp_inode->i_atime = CURRENT_TIME;
-               /* tmp_inode->i_mtime =  BB FIXME - add dos time handling
-               tmp_inode->i_ctime = 0;   BB FIXME */
-
        }
 
        /* Linux can not store file creation time unfortunately so ignore it */
@@ -938,6 +956,7 @@ static int cifs_save_resume_key(const char *current_entry,
                filename = &pFindData->FileName[0];
                /* one byte length, no name conversion */
                len = (unsigned int)pFindData->FileNameLength;
+               cifsFile->srch_inf.resume_key = pFindData->ResumeKey;
        } else {
                cFYI(1,("Unknown findfirst level %d",level));
                return -EINVAL;
index 22b4c35dcfe3e4bfbc067b56880f5fb958df50ae..a8a083543ba050fa65cceee9a302c9b733f68311 100644 (file)
@@ -268,6 +268,10 @@ static int decode_ascii_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo
        ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
        if(ses->serverOS)
                strncpy(ses->serverOS, bcc_ptr, len);
+       if(strncmp(ses->serverOS, "OS/2",4) == 0) {
+                       cFYI(1,("OS/2 server"));
+                       ses->flags |= CIFS_SES_OS2;
+       }
 
        bcc_ptr += len + 1;
        bleft -= len + 1;
@@ -290,16 +294,11 @@ static int decode_ascii_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo
         if(len > bleft)
                 return rc;
 
-        if(ses->serverDomain)
-                kfree(ses->serverDomain);
-
-        ses->serverDomain = kzalloc(len + 1, GFP_KERNEL);
-        if(ses->serverOS)
-                strncpy(ses->serverOS, bcc_ptr, len);
-
-        bcc_ptr += len + 1;
-       bleft -= len + 1;
-
+       /* No domain field in LANMAN case. Domain is
+          returned by old servers in the SMB negprot response */
+       /* BB For newer servers which do not support Unicode,
+          but thus do return domain here we could add parsing
+          for it later, but it is not very important */
        cFYI(1,("ascii: bytes left %d",bleft));
 
        return rc;
@@ -366,6 +365,8 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
        str_area = kmalloc(2000, GFP_KERNEL);
        bcc_ptr = str_area;
 
+       ses->flags &= ~CIFS_SES_LANMAN;
+
        if(type == LANMAN) {
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
                char lnm_session_key[CIFS_SESS_KEY_SIZE];
@@ -377,7 +378,7 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
                /* and copy into bcc */
 
                calc_lanman_hash(ses, lnm_session_key);
-
+               ses->flags |= CIFS_SES_LANMAN; 
 /* #ifdef CONFIG_CIFS_DEBUG2
                cifs_dump_mem("cryptkey: ",ses->server->cryptKey,
                        CIFS_SESS_KEY_SIZE);
index efaa044523a7094ecbbddd1ead52f89fc424cc7e..7a1b2b961ec875b7f1a964d8f20f27b8fec58239 100644 (file)
@@ -364,20 +364,20 @@ E_P24(unsigned char *p21, unsigned char *c8, unsigned char *p24)
        smbhash(p24 + 16, c8, p21 + 14, 1);
 }
 
-void
+#if 0 /* currently unsued */
+static void
 D_P16(unsigned char *p14, unsigned char *in, unsigned char *out)
 {
        smbhash(out, in, p14, 0);
        smbhash(out + 8, in + 8, p14 + 7, 0);
 }
 
-void
+static void
 E_old_pw_hash(unsigned char *p14, unsigned char *in, unsigned char *out)
 {
        smbhash(out, in, p14, 1);
        smbhash(out + 8, in + 8, p14 + 7, 1);
 }
-#if 0
 /* these routines are currently unneeded, but may be
        needed later */
 void
index f518c5e45035c50955b51c55d08ab86b66127842..4b25ba92180d649497b74f77a594b7ba7327ed5f 100644 (file)
 
 void SMBencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
 void E_md4hash(const unsigned char *passwd, unsigned char *p16);
-void nt_lm_owf_gen(char *pwd, unsigned char nt_p16[16], unsigned char p16[16]);
 static void SMBOWFencrypt(unsigned char passwd[16], unsigned char *c8,
                   unsigned char p24[24]);
-void NTLMSSPOWFencrypt(unsigned char passwd[8],
-                      unsigned char *ntlmchalresp, unsigned char p24[24]);
 void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
 
 /*
@@ -144,8 +141,9 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16)
        memset(wpwd,0,129 * 2);
 }
 
+#if 0 /* currently unused */
 /* Does both the NT and LM owfs of a user's password */
-void
+static void
 nt_lm_owf_gen(char *pwd, unsigned char nt_p16[16], unsigned char p16[16])
 {
        char passwd[514];
@@ -171,6 +169,7 @@ nt_lm_owf_gen(char *pwd, unsigned char nt_p16[16], unsigned char p16[16])
        /* clear out local copy of user's password (just being paranoid). */
        memset(passwd, '\0', sizeof (passwd));
 }
+#endif
 
 /* Does the NTLMv2 owfs of a user's password */
 #if 0  /* function not needed yet - but will be soon */
@@ -223,7 +222,8 @@ SMBOWFencrypt(unsigned char passwd[16], unsigned char *c8,
 }
 
 /* Does the des encryption from the FIRST 8 BYTES of the NT or LM MD4 hash. */
-void
+#if 0 /* currently unused */
+static void
 NTLMSSPOWFencrypt(unsigned char passwd[8],
                  unsigned char *ntlmchalresp, unsigned char p24[24])
 {
@@ -235,6 +235,7 @@ NTLMSSPOWFencrypt(unsigned char passwd[8],
 
        E_P24(p21, ntlmchalresp, p24);
 }
+#endif
 
 /* Does the NT MD4 hash then des encryption. */
 
index 27ca1aa305625fbc19b0122b7875d75f06125c53..a91f2628c981328e41a1b93a5b9c22cc7b0f913a 100644 (file)
@@ -2438,13 +2438,17 @@ HANDLE_IOCTL(0x1260, broken_blkgetsize)
 HANDLE_IOCTL(BLKFRAGET, w_long)
 HANDLE_IOCTL(BLKSECTGET, w_long)
 HANDLE_IOCTL(BLKPG, blkpg_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans)
 HANDLE_IOCTL(HDIO_GET_UNMASKINTR, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans)
-HANDLE_IOCTL(HDIO_GET_32BIT, hdio_ioctl_trans)
 HANDLE_IOCTL(HDIO_GET_MULTCOUNT, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_KEEPSETTINGS, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_32BIT, hdio_ioctl_trans)
 HANDLE_IOCTL(HDIO_GET_NOWERR, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_DMA, hdio_ioctl_trans)
 HANDLE_IOCTL(HDIO_GET_NICE, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_WCACHE, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_ACOUSTIC, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_ADDRESS, hdio_ioctl_trans)
+HANDLE_IOCTL(HDIO_GET_BUSSTATE, hdio_ioctl_trans)
 HANDLE_IOCTL(FDSETPRM32, fd_ioctl_trans)
 HANDLE_IOCTL(FDDEFPRM32, fd_ioctl_trans)
 HANDLE_IOCTL(FDGETPRM32, fd_ioctl_trans)
index 2355bddad8de12609bcc0b517a151883638c79eb..2bac4ba1d1d3755b70598e992847459dc889a962 100644 (file)
@@ -548,6 +548,136 @@ repeat:
        spin_unlock(&dcache_lock);
 }
 
+/*
+ * destroy a single subtree of dentries for unmount
+ * - see the comments on shrink_dcache_for_umount() for a description of the
+ *   locking
+ */
+static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
+{
+       struct dentry *parent;
+
+       BUG_ON(!IS_ROOT(dentry));
+
+       /* detach this root from the system */
+       spin_lock(&dcache_lock);
+       if (!list_empty(&dentry->d_lru)) {
+               dentry_stat.nr_unused--;
+               list_del_init(&dentry->d_lru);
+       }
+       __d_drop(dentry);
+       spin_unlock(&dcache_lock);
+
+       for (;;) {
+               /* descend to the first leaf in the current subtree */
+               while (!list_empty(&dentry->d_subdirs)) {
+                       struct dentry *loop;
+
+                       /* this is a branch with children - detach all of them
+                        * from the system in one go */
+                       spin_lock(&dcache_lock);
+                       list_for_each_entry(loop, &dentry->d_subdirs,
+                                           d_u.d_child) {
+                               if (!list_empty(&loop->d_lru)) {
+                                       dentry_stat.nr_unused--;
+                                       list_del_init(&loop->d_lru);
+                               }
+
+                               __d_drop(loop);
+                               cond_resched_lock(&dcache_lock);
+                       }
+                       spin_unlock(&dcache_lock);
+
+                       /* move to the first child */
+                       dentry = list_entry(dentry->d_subdirs.next,
+                                           struct dentry, d_u.d_child);
+               }
+
+               /* consume the dentries from this leaf up through its parents
+                * until we find one with children or run out altogether */
+               do {
+                       struct inode *inode;
+
+                       if (atomic_read(&dentry->d_count) != 0) {
+                               printk(KERN_ERR
+                                      "BUG: Dentry %p{i=%lx,n=%s}"
+                                      " still in use (%d)"
+                                      " [unmount of %s %s]\n",
+                                      dentry,
+                                      dentry->d_inode ?
+                                      dentry->d_inode->i_ino : 0UL,
+                                      dentry->d_name.name,
+                                      atomic_read(&dentry->d_count),
+                                      dentry->d_sb->s_type->name,
+                                      dentry->d_sb->s_id);
+                               BUG();
+                       }
+
+                       parent = dentry->d_parent;
+                       if (parent == dentry)
+                               parent = NULL;
+                       else
+                               atomic_dec(&parent->d_count);
+
+                       list_del(&dentry->d_u.d_child);
+                       dentry_stat.nr_dentry--;        /* For d_free, below */
+
+                       inode = dentry->d_inode;
+                       if (inode) {
+                               dentry->d_inode = NULL;
+                               list_del_init(&dentry->d_alias);
+                               if (dentry->d_op && dentry->d_op->d_iput)
+                                       dentry->d_op->d_iput(dentry, inode);
+                               else
+                                       iput(inode);
+                       }
+
+                       d_free(dentry);
+
+                       /* finished when we fall off the top of the tree,
+                        * otherwise we ascend to the parent and move to the
+                        * next sibling if there is one */
+                       if (!parent)
+                               return;
+
+                       dentry = parent;
+
+               } while (list_empty(&dentry->d_subdirs));
+
+               dentry = list_entry(dentry->d_subdirs.next,
+                                   struct dentry, d_u.d_child);
+       }
+}
+
+/*
+ * destroy the dentries attached to a superblock on unmounting
+ * - we don't need to use dentry->d_lock, and only need dcache_lock when
+ *   removing the dentry from the system lists and hashes because:
+ *   - the superblock is detached from all mountings and open files, so the
+ *     dentry trees will not be rearranged by the VFS
+ *   - s_umount is write-locked, so the memory pressure shrinker will ignore
+ *     any dentries belonging to this superblock that it comes across
+ *   - the filesystem itself is no longer permitted to rearrange the dentries
+ *     in this superblock
+ */
+void shrink_dcache_for_umount(struct super_block *sb)
+{
+       struct dentry *dentry;
+
+       if (down_read_trylock(&sb->s_umount))
+               BUG();
+
+       dentry = sb->s_root;
+       sb->s_root = NULL;
+       atomic_dec(&dentry->d_count);
+       shrink_dcache_for_umount_subtree(dentry);
+
+       while (!hlist_empty(&sb->s_anon)) {
+               dentry = hlist_entry(sb->s_anon.first, struct dentry, d_hash);
+               shrink_dcache_for_umount_subtree(dentry);
+       }
+}
+
 /*
  * Search for at least 1 mount point in the dentry's subdirs.
  * We descend to the next level whenever the d_subdirs
index 490f85b3fa590efd76ae504b51790a58900625d0..81b2c6465eeb26dd21f23746209162eee8169b22 100644 (file)
@@ -1,10 +1,9 @@
 menu "Distributed Lock Manager"
-       depends on INET && EXPERIMENTAL
+       depends on INET && IP_SCTP && EXPERIMENTAL
 
 config DLM
        tristate "Distributed Lock Manager (DLM)"
        depends on IPV6 || IPV6=n
-       depends on IP_SCTP
        select CONFIGFS_FS
        help
        A general purpose distributed lock manager for kernel or userspace
index 7bcea7c5addba24e724926c75f7e9ab46cebeb38..867f93d0417e3fa1ca11e0e7e4b0363aa02d4a48 100644 (file)
@@ -548,7 +548,7 @@ static int receive_from_sock(void)
        }
        len = iov[0].iov_len + iov[1].iov_len;
 
-       r = ret = kernel_recvmsg(sctp_con.sock, &msg, iov, 1, len,
+       r = ret = kernel_recvmsg(sctp_con.sock, &msg, iov, msg.msg_iovlen, len,
                                 MSG_NOSIGNAL | MSG_DONTWAIT);
        if (ret <= 0)
                goto out_close;
index 557d5b614fae6ba694703b86d950471cc2eeedc5..ae228ec54e948a63b25bb39f34577baa22fd06ed 100644 (file)
 /* Maximum msec timeout value storeable in a long int */
 #define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
 
+#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
+
 
 struct epoll_filefd {
        struct file *file;
@@ -497,7 +499,7 @@ void eventpoll_release_file(struct file *file)
  */
 asmlinkage long sys_epoll_create(int size)
 {
-       int error, fd;
+       int error, fd = -1;
        struct eventpoll *ep;
        struct inode *inode;
        struct file *file;
@@ -640,7 +642,6 @@ eexit_1:
        return error;
 }
 
-#define MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
 
 /*
  * Implement the event wait interface for the eventpoll file. It is the kernel
@@ -657,7 +658,7 @@ asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
                     current, epfd, events, maxevents, timeout));
 
        /* The maximum number of event must be greater than zero */
-       if (maxevents <= 0 || maxevents > MAX_EVENTS)
+       if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
                return -EINVAL;
 
        /* Verify that the area passed by the user is writeable */
@@ -699,6 +700,55 @@ eexit_1:
 }
 
 
+#ifdef TIF_RESTORE_SIGMASK
+
+/*
+ * Implement the event wait interface for the eventpoll file. It is the kernel
+ * part of the user space epoll_pwait(2).
+ */
+asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
+               int maxevents, int timeout, const sigset_t __user *sigmask,
+               size_t sigsetsize)
+{
+       int error;
+       sigset_t ksigmask, sigsaved;
+
+       /*
+        * If the caller wants a certain signal mask to be set during the wait,
+        * we apply it here.
+        */
+       if (sigmask) {
+               if (sigsetsize != sizeof(sigset_t))
+                       return -EINVAL;
+               if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
+                       return -EFAULT;
+               sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
+               sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
+       }
+
+       error = sys_epoll_wait(epfd, events, maxevents, timeout);
+
+       /*
+        * If we changed the signal mask, we need to restore the original one.
+        * In case we've got a signal while waiting, we do not restore the
+        * signal mask yet, and we allow do_signal() to deliver the signal on
+        * the way back to userspace, before the signal mask is restored.
+        */
+       if (sigmask) {
+               if (error == -EINTR) {
+                       memcpy(&current->saved_sigmask, &sigsaved,
+                               sizeof(sigsaved));
+                       set_thread_flag(TIF_RESTORE_SIGMASK);
+               } else
+                       sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       }
+
+       return error;
+}
+
+#endif /* #ifdef TIF_RESTORE_SIGMASK */
+
+
 /*
  * Creates the file descriptor to be used by the epoll interface.
  */
index 513cd421ac0b6627da069f16a1274154500a8374..d8b9abd95d07e4bf2fa81020856cd4026b766790 100644 (file)
@@ -364,7 +364,6 @@ static int parse_options (char * options,
 {
        char * p;
        substring_t args[MAX_OPT_ARGS];
-       unsigned long kind = EXT2_MOUNT_ERRORS_CONT;
        int option;
 
        if (!options)
@@ -404,13 +403,19 @@ static int parse_options (char * options,
                        /* *sb_block = match_int(&args[0]); */
                        break;
                case Opt_err_panic:
-                       kind = EXT2_MOUNT_ERRORS_PANIC;
+                       clear_opt (sbi->s_mount_opt, ERRORS_CONT);
+                       clear_opt (sbi->s_mount_opt, ERRORS_RO);
+                       set_opt (sbi->s_mount_opt, ERRORS_PANIC);
                        break;
                case Opt_err_ro:
-                       kind = EXT2_MOUNT_ERRORS_RO;
+                       clear_opt (sbi->s_mount_opt, ERRORS_CONT);
+                       clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
+                       set_opt (sbi->s_mount_opt, ERRORS_RO);
                        break;
                case Opt_err_cont:
-                       kind = EXT2_MOUNT_ERRORS_CONT;
+                       clear_opt (sbi->s_mount_opt, ERRORS_RO);
+                       clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
+                       set_opt (sbi->s_mount_opt, ERRORS_CONT);
                        break;
                case Opt_nouid32:
                        set_opt (sbi->s_mount_opt, NO_UID32);
@@ -489,7 +494,6 @@ static int parse_options (char * options,
                        return 0;
                }
        }
-       sbi->s_mount_opt |= kind;
        return 1;
 }
 
@@ -715,6 +719,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
                set_opt(sbi->s_mount_opt, ERRORS_PANIC);
        else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO)
                set_opt(sbi->s_mount_opt, ERRORS_RO);
+       else
+               set_opt(sbi->s_mount_opt, ERRORS_CONT);
 
        sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
        sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
index 8bfd56ef18ca56d9c584b250b77ab3aa1220a337..afc2d4f42d7782800f6d65dfc49c6aebc1925169 100644 (file)
@@ -1470,6 +1470,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
                set_opt(sbi->s_mount_opt, ERRORS_PANIC);
        else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_RO)
                set_opt(sbi->s_mount_opt, ERRORS_RO);
+       else
+               set_opt(sbi->s_mount_opt, ERRORS_CONT);
 
        sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
        sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
new file mode 100644 (file)
index 0000000..a6acb96
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux ext4-filesystem routines.
+#
+
+obj-$(CONFIG_EXT4DEV_FS) += ext4dev.o
+
+ext4dev-y      := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
+          ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o
+
+ext4dev-$(CONFIG_EXT4DEV_FS_XATTR)     += xattr.o xattr_user.o xattr_trusted.o
+ext4dev-$(CONFIG_EXT4DEV_FS_POSIX_ACL) += acl.o
+ext4dev-$(CONFIG_EXT4DEV_FS_SECURITY)  += xattr_security.o
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
new file mode 100644 (file)
index 0000000..9e88254
--- /dev/null
@@ -0,0 +1,551 @@
+/*
+ * linux/fs/ext4/acl.c
+ *
+ * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/capability.h>
+#include <linux/fs.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/ext4_fs.h>
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * Convert from filesystem to in-memory representation.
+ */
+static struct posix_acl *
+ext4_acl_from_disk(const void *value, size_t size)
+{
+       const char *end = (char *)value + size;
+       int n, count;
+       struct posix_acl *acl;
+
+       if (!value)
+               return NULL;
+       if (size < sizeof(ext4_acl_header))
+                return ERR_PTR(-EINVAL);
+       if (((ext4_acl_header *)value)->a_version !=
+           cpu_to_le32(EXT4_ACL_VERSION))
+               return ERR_PTR(-EINVAL);
+       value = (char *)value + sizeof(ext4_acl_header);
+       count = ext4_acl_count(size);
+       if (count < 0)
+               return ERR_PTR(-EINVAL);
+       if (count == 0)
+               return NULL;
+       acl = posix_acl_alloc(count, GFP_KERNEL);
+       if (!acl)
+               return ERR_PTR(-ENOMEM);
+       for (n=0; n < count; n++) {
+               ext4_acl_entry *entry =
+                       (ext4_acl_entry *)value;
+               if ((char *)value + sizeof(ext4_acl_entry_short) > end)
+                       goto fail;
+               acl->a_entries[n].e_tag  = le16_to_cpu(entry->e_tag);
+               acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
+               switch(acl->a_entries[n].e_tag) {
+                       case ACL_USER_OBJ:
+                       case ACL_GROUP_OBJ:
+                       case ACL_MASK:
+                       case ACL_OTHER:
+                               value = (char *)value +
+                                       sizeof(ext4_acl_entry_short);
+                               acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
+                               break;
+
+                       case ACL_USER:
+                       case ACL_GROUP:
+                               value = (char *)value + sizeof(ext4_acl_entry);
+                               if ((char *)value > end)
+                                       goto fail;
+                               acl->a_entries[n].e_id =
+                                       le32_to_cpu(entry->e_id);
+                               break;
+
+                       default:
+                               goto fail;
+               }
+       }
+       if (value != end)
+               goto fail;
+       return acl;
+
+fail:
+       posix_acl_release(acl);
+       return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Convert from in-memory to filesystem representation.
+ */
+static void *
+ext4_acl_to_disk(const struct posix_acl *acl, size_t *size)
+{
+       ext4_acl_header *ext_acl;
+       char *e;
+       size_t n;
+
+       *size = ext4_acl_size(acl->a_count);
+       ext_acl = kmalloc(sizeof(ext4_acl_header) + acl->a_count *
+                       sizeof(ext4_acl_entry), GFP_KERNEL);
+       if (!ext_acl)
+               return ERR_PTR(-ENOMEM);
+       ext_acl->a_version = cpu_to_le32(EXT4_ACL_VERSION);
+       e = (char *)ext_acl + sizeof(ext4_acl_header);
+       for (n=0; n < acl->a_count; n++) {
+               ext4_acl_entry *entry = (ext4_acl_entry *)e;
+               entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
+               entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
+               switch(acl->a_entries[n].e_tag) {
+                       case ACL_USER:
+                       case ACL_GROUP:
+                               entry->e_id =
+                                       cpu_to_le32(acl->a_entries[n].e_id);
+                               e += sizeof(ext4_acl_entry);
+                               break;
+
+                       case ACL_USER_OBJ:
+                       case ACL_GROUP_OBJ:
+                       case ACL_MASK:
+                       case ACL_OTHER:
+                               e += sizeof(ext4_acl_entry_short);
+                               break;
+
+                       default:
+                               goto fail;
+               }
+       }
+       return (char *)ext_acl;
+
+fail:
+       kfree(ext_acl);
+       return ERR_PTR(-EINVAL);
+}
+
+static inline struct posix_acl *
+ext4_iget_acl(struct inode *inode, struct posix_acl **i_acl)
+{
+       struct posix_acl *acl = EXT4_ACL_NOT_CACHED;
+
+       spin_lock(&inode->i_lock);
+       if (*i_acl != EXT4_ACL_NOT_CACHED)
+               acl = posix_acl_dup(*i_acl);
+       spin_unlock(&inode->i_lock);
+
+       return acl;
+}
+
+static inline void
+ext4_iset_acl(struct inode *inode, struct posix_acl **i_acl,
+               struct posix_acl *acl)
+{
+       spin_lock(&inode->i_lock);
+       if (*i_acl != EXT4_ACL_NOT_CACHED)
+               posix_acl_release(*i_acl);
+       *i_acl = posix_acl_dup(acl);
+       spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Inode operation get_posix_acl().
+ *
+ * inode->i_mutex: don't care
+ */
+static struct posix_acl *
+ext4_get_acl(struct inode *inode, int type)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       int name_index;
+       char *value = NULL;
+       struct posix_acl *acl;
+       int retval;
+
+       if (!test_opt(inode->i_sb, POSIX_ACL))
+               return NULL;
+
+       switch(type) {
+               case ACL_TYPE_ACCESS:
+                       acl = ext4_iget_acl(inode, &ei->i_acl);
+                       if (acl != EXT4_ACL_NOT_CACHED)
+                               return acl;
+                       name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
+                       break;
+
+               case ACL_TYPE_DEFAULT:
+                       acl = ext4_iget_acl(inode, &ei->i_default_acl);
+                       if (acl != EXT4_ACL_NOT_CACHED)
+                               return acl;
+                       name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
+                       break;
+
+               default:
+                       return ERR_PTR(-EINVAL);
+       }
+       retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
+       if (retval > 0) {
+               value = kmalloc(retval, GFP_KERNEL);
+               if (!value)
+                       return ERR_PTR(-ENOMEM);
+               retval = ext4_xattr_get(inode, name_index, "", value, retval);
+       }
+       if (retval > 0)
+               acl = ext4_acl_from_disk(value, retval);
+       else if (retval == -ENODATA || retval == -ENOSYS)
+               acl = NULL;
+       else
+               acl = ERR_PTR(retval);
+       kfree(value);
+
+       if (!IS_ERR(acl)) {
+               switch(type) {
+                       case ACL_TYPE_ACCESS:
+                               ext4_iset_acl(inode, &ei->i_acl, acl);
+                               break;
+
+                       case ACL_TYPE_DEFAULT:
+                               ext4_iset_acl(inode, &ei->i_default_acl, acl);
+                               break;
+               }
+       }
+       return acl;
+}
+
+/*
+ * Set the access or default ACL of an inode.
+ *
+ * inode->i_mutex: down unless called from ext4_new_inode
+ */
+static int
+ext4_set_acl(handle_t *handle, struct inode *inode, int type,
+            struct posix_acl *acl)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       int name_index;
+       void *value = NULL;
+       size_t size = 0;
+       int error;
+
+       if (S_ISLNK(inode->i_mode))
+               return -EOPNOTSUPP;
+
+       switch(type) {
+               case ACL_TYPE_ACCESS:
+                       name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
+                       if (acl) {
+                               mode_t mode = inode->i_mode;
+                               error = posix_acl_equiv_mode(acl, &mode);
+                               if (error < 0)
+                                       return error;
+                               else {
+                                       inode->i_mode = mode;
+                                       ext4_mark_inode_dirty(handle, inode);
+                                       if (error == 0)
+                                               acl = NULL;
+                               }
+                       }
+                       break;
+
+               case ACL_TYPE_DEFAULT:
+                       name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
+                       if (!S_ISDIR(inode->i_mode))
+                               return acl ? -EACCES : 0;
+                       break;
+
+               default:
+                       return -EINVAL;
+       }
+       if (acl) {
+               value = ext4_acl_to_disk(acl, &size);
+               if (IS_ERR(value))
+                       return (int)PTR_ERR(value);
+       }
+
+       error = ext4_xattr_set_handle(handle, inode, name_index, "",
+                                     value, size, 0);
+
+       kfree(value);
+       if (!error) {
+               switch(type) {
+                       case ACL_TYPE_ACCESS:
+                               ext4_iset_acl(inode, &ei->i_acl, acl);
+                               break;
+
+                       case ACL_TYPE_DEFAULT:
+                               ext4_iset_acl(inode, &ei->i_default_acl, acl);
+                               break;
+               }
+       }
+       return error;
+}
+
+static int
+ext4_check_acl(struct inode *inode, int mask)
+{
+       struct posix_acl *acl = ext4_get_acl(inode, ACL_TYPE_ACCESS);
+
+       if (IS_ERR(acl))
+               return PTR_ERR(acl);
+       if (acl) {
+               int error = posix_acl_permission(inode, acl, mask);
+               posix_acl_release(acl);
+               return error;
+       }
+
+       return -EAGAIN;
+}
+
+int
+ext4_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+       return generic_permission(inode, mask, ext4_check_acl);
+}
+
+/*
+ * Initialize the ACLs of a new inode. Called from ext4_new_inode.
+ *
+ * dir->i_mutex: down
+ * inode->i_mutex: up (access to inode is still exclusive)
+ */
+int
+ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
+{
+       struct posix_acl *acl = NULL;
+       int error = 0;
+
+       if (!S_ISLNK(inode->i_mode)) {
+               if (test_opt(dir->i_sb, POSIX_ACL)) {
+                       acl = ext4_get_acl(dir, ACL_TYPE_DEFAULT);
+                       if (IS_ERR(acl))
+                               return PTR_ERR(acl);
+               }
+               if (!acl)
+                       inode->i_mode &= ~current->fs->umask;
+       }
+       if (test_opt(inode->i_sb, POSIX_ACL) && acl) {
+               struct posix_acl *clone;
+               mode_t mode;
+
+               if (S_ISDIR(inode->i_mode)) {
+                       error = ext4_set_acl(handle, inode,
+                                            ACL_TYPE_DEFAULT, acl);
+                       if (error)
+                               goto cleanup;
+               }
+               clone = posix_acl_clone(acl, GFP_KERNEL);
+               error = -ENOMEM;
+               if (!clone)
+                       goto cleanup;
+
+               mode = inode->i_mode;
+               error = posix_acl_create_masq(clone, &mode);
+               if (error >= 0) {
+                       inode->i_mode = mode;
+                       if (error > 0) {
+                               /* This is an extended ACL */
+                               error = ext4_set_acl(handle, inode,
+                                                    ACL_TYPE_ACCESS, clone);
+                       }
+               }
+               posix_acl_release(clone);
+       }
+cleanup:
+       posix_acl_release(acl);
+       return error;
+}
+
+/*
+ * Does chmod for an inode that may have an Access Control List. The
+ * inode->i_mode field must be updated to the desired value by the caller
+ * before calling this function.
+ * Returns 0 on success, or a negative error number.
+ *
+ * We change the ACL rather than storing some ACL entries in the file
+ * mode permission bits (which would be more efficient), because that
+ * would break once additional permissions (like  ACL_APPEND, ACL_DELETE
+ * for directories) are added. There are no more bits available in the
+ * file mode.
+ *
+ * inode->i_mutex: down
+ */
+int
+ext4_acl_chmod(struct inode *inode)
+{
+       struct posix_acl *acl, *clone;
+       int error;
+
+       if (S_ISLNK(inode->i_mode))
+               return -EOPNOTSUPP;
+       if (!test_opt(inode->i_sb, POSIX_ACL))
+               return 0;
+       acl = ext4_get_acl(inode, ACL_TYPE_ACCESS);
+       if (IS_ERR(acl) || !acl)
+               return PTR_ERR(acl);
+       clone = posix_acl_clone(acl, GFP_KERNEL);
+       posix_acl_release(acl);
+       if (!clone)
+               return -ENOMEM;
+       error = posix_acl_chmod_masq(clone, inode->i_mode);
+       if (!error) {
+               handle_t *handle;
+               int retries = 0;
+
+       retry:
+               handle = ext4_journal_start(inode,
+                               EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+               if (IS_ERR(handle)) {
+                       error = PTR_ERR(handle);
+                       ext4_std_error(inode->i_sb, error);
+                       goto out;
+               }
+               error = ext4_set_acl(handle, inode, ACL_TYPE_ACCESS, clone);
+               ext4_journal_stop(handle);
+               if (error == -ENOSPC &&
+                   ext4_should_retry_alloc(inode->i_sb, &retries))
+                       goto retry;
+       }
+out:
+       posix_acl_release(clone);
+       return error;
+}
+
+/*
+ * Extended attribute handlers
+ */
+static size_t
+ext4_xattr_list_acl_access(struct inode *inode, char *list, size_t list_len,
+                          const char *name, size_t name_len)
+{
+       const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
+
+       if (!test_opt(inode->i_sb, POSIX_ACL))
+               return 0;
+       if (list && size <= list_len)
+               memcpy(list, POSIX_ACL_XATTR_ACCESS, size);
+       return size;
+}
+
+static size_t
+ext4_xattr_list_acl_default(struct inode *inode, char *list, size_t list_len,
+                           const char *name, size_t name_len)
+{
+       const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
+
+       if (!test_opt(inode->i_sb, POSIX_ACL))
+               return 0;
+       if (list && size <= list_len)
+               memcpy(list, POSIX_ACL_XATTR_DEFAULT, size);
+       return size;
+}
+
+static int
+ext4_xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
+{
+       struct posix_acl *acl;
+       int error;
+
+       if (!test_opt(inode->i_sb, POSIX_ACL))
+               return -EOPNOTSUPP;
+
+       acl = ext4_get_acl(inode, type);
+       if (IS_ERR(acl))
+               return PTR_ERR(acl);
+       if (acl == NULL)
+               return -ENODATA;
+       error = posix_acl_to_xattr(acl, buffer, size);
+       posix_acl_release(acl);
+
+       return error;
+}
+
+static int
+ext4_xattr_get_acl_access(struct inode *inode, const char *name,
+                         void *buffer, size_t size)
+{
+       if (strcmp(name, "") != 0)
+               return -EINVAL;
+       return ext4_xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
+}
+
+static int
+ext4_xattr_get_acl_default(struct inode *inode, const char *name,
+                          void *buffer, size_t size)
+{
+       if (strcmp(name, "") != 0)
+               return -EINVAL;
+       return ext4_xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
+}
+
+static int
+ext4_xattr_set_acl(struct inode *inode, int type, const void *value,
+                  size_t size)
+{
+       handle_t *handle;
+       struct posix_acl *acl;
+       int error, retries = 0;
+
+       if (!test_opt(inode->i_sb, POSIX_ACL))
+               return -EOPNOTSUPP;
+       if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+               return -EPERM;
+
+       if (value) {
+               acl = posix_acl_from_xattr(value, size);
+               if (IS_ERR(acl))
+                       return PTR_ERR(acl);
+               else if (acl) {
+                       error = posix_acl_valid(acl);
+                       if (error)
+                               goto release_and_out;
+               }
+       } else
+               acl = NULL;
+
+retry:
+       handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       error = ext4_set_acl(handle, inode, type, acl);
+       ext4_journal_stop(handle);
+       if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
+
+release_and_out:
+       posix_acl_release(acl);
+       return error;
+}
+
+static int
+ext4_xattr_set_acl_access(struct inode *inode, const char *name,
+                         const void *value, size_t size, int flags)
+{
+       if (strcmp(name, "") != 0)
+               return -EINVAL;
+       return ext4_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
+}
+
+static int
+ext4_xattr_set_acl_default(struct inode *inode, const char *name,
+                          const void *value, size_t size, int flags)
+{
+       if (strcmp(name, "") != 0)
+               return -EINVAL;
+       return ext4_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
+}
+
+struct xattr_handler ext4_xattr_acl_access_handler = {
+       .prefix = POSIX_ACL_XATTR_ACCESS,
+       .list   = ext4_xattr_list_acl_access,
+       .get    = ext4_xattr_get_acl_access,
+       .set    = ext4_xattr_set_acl_access,
+};
+
+struct xattr_handler ext4_xattr_acl_default_handler = {
+       .prefix = POSIX_ACL_XATTR_DEFAULT,
+       .list   = ext4_xattr_list_acl_default,
+       .get    = ext4_xattr_get_acl_default,
+       .set    = ext4_xattr_set_acl_default,
+};
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
new file mode 100644 (file)
index 0000000..26a5c1a
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+  File: fs/ext4/acl.h
+
+  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
+*/
+
+#include <linux/posix_acl_xattr.h>
+
+#define EXT4_ACL_VERSION       0x0001
+
+typedef struct {
+       __le16          e_tag;
+       __le16          e_perm;
+       __le32          e_id;
+} ext4_acl_entry;
+
+typedef struct {
+       __le16          e_tag;
+       __le16          e_perm;
+} ext4_acl_entry_short;
+
+typedef struct {
+       __le32          a_version;
+} ext4_acl_header;
+
+static inline size_t ext4_acl_size(int count)
+{
+       if (count <= 4) {
+               return sizeof(ext4_acl_header) +
+                      count * sizeof(ext4_acl_entry_short);
+       } else {
+               return sizeof(ext4_acl_header) +
+                      4 * sizeof(ext4_acl_entry_short) +
+                      (count - 4) * sizeof(ext4_acl_entry);
+       }
+}
+
+static inline int ext4_acl_count(size_t size)
+{
+       ssize_t s;
+       size -= sizeof(ext4_acl_header);
+       s = size - 4 * sizeof(ext4_acl_entry_short);
+       if (s < 0) {
+               if (size % sizeof(ext4_acl_entry_short))
+                       return -1;
+               return size / sizeof(ext4_acl_entry_short);
+       } else {
+               if (s % sizeof(ext4_acl_entry))
+                       return -1;
+               return s / sizeof(ext4_acl_entry) + 4;
+       }
+}
+
+#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
+
+/* Value for inode->u.ext4_i.i_acl and inode->u.ext4_i.i_default_acl
+   if the ACL has not been cached */
+#define EXT4_ACL_NOT_CACHED ((void *)-1)
+
+/* acl.c */
+extern int ext4_permission (struct inode *, int, struct nameidata *);
+extern int ext4_acl_chmod (struct inode *);
+extern int ext4_init_acl (handle_t *, struct inode *, struct inode *);
+
+#else  /* CONFIG_EXT4DEV_FS_POSIX_ACL */
+#include <linux/sched.h>
+#define ext4_permission NULL
+
+static inline int
+ext4_acl_chmod(struct inode *inode)
+{
+       return 0;
+}
+
+static inline int
+ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
+{
+       return 0;
+}
+#endif  /* CONFIG_EXT4DEV_FS_POSIX_ACL */
+
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
new file mode 100644 (file)
index 0000000..5d45582
--- /dev/null
@@ -0,0 +1,1833 @@
+/*
+ *  linux/fs/ext4/balloc.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/time.h>
+#include <linux/capability.h>
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/ext4_fs.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+
+/*
+ * balloc.c contains the blocks allocation and deallocation routines
+ */
+
+/*
+ * Calculate the block group number and offset, given a block number
+ */
+void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
+               unsigned long *blockgrpp, ext4_grpblk_t *offsetp)
+{
+        struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+       ext4_grpblk_t offset;
+
+        blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
+       offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
+       if (offsetp)
+               *offsetp = offset;
+       if (blockgrpp)
+               *blockgrpp = blocknr;
+
+}
+
+/*
+ * The free blocks are managed by bitmaps.  A file system contains several
+ * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
+ * block for inodes, N blocks for the inode table and data blocks.
+ *
+ * The file system contains group descriptors which are located after the
+ * super block.  Each descriptor contains the number of the bitmap block and
+ * the free blocks count in the block.  The descriptors are loaded in memory
+ * when a file system is mounted (see ext4_read_super).
+ */
+
+
+#define in_range(b, first, len)        ((b) >= (first) && (b) <= (first) + (len) - 1)
+
+/**
+ * ext4_get_group_desc() -- load group descriptor from disk
+ * @sb:                        super block
+ * @block_group:       given block group
+ * @bh:                        pointer to the buffer head to store the block
+ *                     group descriptor
+ */
+struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
+                                            unsigned int block_group,
+                                            struct buffer_head ** bh)
+{
+       unsigned long group_desc;
+       unsigned long offset;
+       struct ext4_group_desc * desc;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (block_group >= sbi->s_groups_count) {
+               ext4_error (sb, "ext4_get_group_desc",
+                           "block_group >= groups_count - "
+                           "block_group = %d, groups_count = %lu",
+                           block_group, sbi->s_groups_count);
+
+               return NULL;
+       }
+       smp_rmb();
+
+       group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+       offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+       if (!sbi->s_group_desc[group_desc]) {
+               ext4_error (sb, "ext4_get_group_desc",
+                           "Group descriptor not loaded - "
+                           "block_group = %d, group_desc = %lu, desc = %lu",
+                            block_group, group_desc, offset);
+               return NULL;
+       }
+
+       desc = (struct ext4_group_desc *)(
+               (__u8 *)sbi->s_group_desc[group_desc]->b_data +
+               offset * EXT4_DESC_SIZE(sb));
+       if (bh)
+               *bh = sbi->s_group_desc[group_desc];
+       return desc;
+}
+
+/**
+ * read_block_bitmap()
+ * @sb:                        super block
+ * @block_group:       given block group
+ *
+ * Read the bitmap for a given block_group, reading into the specified
+ * slot in the superblock's bitmap cache.
+ *
+ * Return buffer_head on success or NULL in case of failure.
+ */
+static struct buffer_head *
+read_block_bitmap(struct super_block *sb, unsigned int block_group)
+{
+       struct ext4_group_desc * desc;
+       struct buffer_head * bh = NULL;
+
+       desc = ext4_get_group_desc (sb, block_group, NULL);
+       if (!desc)
+               goto error_out;
+       bh = sb_bread(sb, ext4_block_bitmap(sb, desc));
+       if (!bh)
+               ext4_error (sb, "read_block_bitmap",
+                           "Cannot read block bitmap - "
+                           "block_group = %d, block_bitmap = %llu",
+                           block_group,
+                           ext4_block_bitmap(sb, desc));
+error_out:
+       return bh;
+}
+/*
+ * The reservation window structure operations
+ * --------------------------------------------
+ * Operations include:
+ * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
+ *
+ * We use a red-black tree to represent per-filesystem reservation
+ * windows.
+ *
+ */
+
+/**
+ * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
+ * @rb_root:           root of per-filesystem reservation rb tree
+ * @verbose:           verbose mode
+ * @fn:                        function which wishes to dump the reservation map
+ *
+ * If verbose is turned on, it will print the whole block reservation
+ * windows(start, end).        Otherwise, it will only print out the "bad" windows,
+ * those windows that overlap with their immediate neighbors.
+ */
+#if 1
+static void __rsv_window_dump(struct rb_root *root, int verbose,
+                             const char *fn)
+{
+       struct rb_node *n;
+       struct ext4_reserve_window_node *rsv, *prev;
+       int bad;
+
+restart:
+       n = rb_first(root);
+       bad = 0;
+       prev = NULL;
+
+       printk("Block Allocation Reservation Windows Map (%s):\n", fn);
+       while (n) {
+               rsv = list_entry(n, struct ext4_reserve_window_node, rsv_node);
+               if (verbose)
+                       printk("reservation window 0x%p "
+                              "start:  %llu, end:  %llu\n",
+                              rsv, rsv->rsv_start, rsv->rsv_end);
+               if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
+                       printk("Bad reservation %p (start >= end)\n",
+                              rsv);
+                       bad = 1;
+               }
+               if (prev && prev->rsv_end >= rsv->rsv_start) {
+                       printk("Bad reservation %p (prev->end >= start)\n",
+                              rsv);
+                       bad = 1;
+               }
+               if (bad) {
+                       if (!verbose) {
+                               printk("Restarting reservation walk in verbose mode\n");
+                               verbose = 1;
+                               goto restart;
+                       }
+               }
+               n = rb_next(n);
+               prev = rsv;
+       }
+       printk("Window map complete.\n");
+       if (bad)
+               BUG();
+}
+#define rsv_window_dump(root, verbose) \
+       __rsv_window_dump((root), (verbose), __FUNCTION__)
+#else
+#define rsv_window_dump(root, verbose) do {} while (0)
+#endif
+
+/**
+ * goal_in_my_reservation()
+ * @rsv:               inode's reservation window
+ * @grp_goal:          given goal block relative to the allocation block group
+ * @group:             the current allocation block group
+ * @sb:                        filesystem super block
+ *
+ * Test if the given goal block (group relative) is within the file's
+ * own block reservation window range.
+ *
+ * If the reservation window is outside the goal allocation group, return 0;
+ * grp_goal (given goal block) could be -1, which means no specific
+ * goal block. In this case, always return 1.
+ * If the goal block is within the reservation window, return 1;
+ * otherwise, return 0;
+ */
+static int
+goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
+                       unsigned int group, struct super_block * sb)
+{
+       ext4_fsblk_t group_first_block, group_last_block;
+
+       group_first_block = ext4_group_first_block_no(sb, group);
+       group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
+
+       if ((rsv->_rsv_start > group_last_block) ||
+           (rsv->_rsv_end < group_first_block))
+               return 0;
+       if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
+               || (grp_goal + group_first_block > rsv->_rsv_end)))
+               return 0;
+       return 1;
+}
+
+/**
+ * search_reserve_window()
+ * @rb_root:           root of reservation tree
+ * @goal:              target allocation block
+ *
+ * Find the reserved window which includes the goal, or the previous one
+ * if the goal is not in any window.
+ * Returns NULL if there are no windows or if all windows start after the goal.
+ */
+static struct ext4_reserve_window_node *
+search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
+{
+       struct rb_node *n = root->rb_node;
+       struct ext4_reserve_window_node *rsv;
+
+       if (!n)
+               return NULL;
+
+       do {
+               rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
+
+               if (goal < rsv->rsv_start)
+                       n = n->rb_left;
+               else if (goal > rsv->rsv_end)
+                       n = n->rb_right;
+               else
+                       return rsv;
+       } while (n);
+       /*
+        * We've fallen off the end of the tree: the goal wasn't inside
+        * any particular node.  OK, the previous node must be to one
+        * side of the interval containing the goal.  If it's the RHS,
+        * we need to back up one.
+        */
+       if (rsv->rsv_start > goal) {
+               n = rb_prev(&rsv->rsv_node);
+               rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
+       }
+       return rsv;
+}
+
+/**
+ * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree.
+ * @sb:                        super block
+ * @rsv:               reservation window to add
+ *
+ * Must be called with rsv_lock hold.
+ */
+void ext4_rsv_window_add(struct super_block *sb,
+                   struct ext4_reserve_window_node *rsv)
+{
+       struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
+       struct rb_node *node = &rsv->rsv_node;
+       ext4_fsblk_t start = rsv->rsv_start;
+
+       struct rb_node ** p = &root->rb_node;
+       struct rb_node * parent = NULL;
+       struct ext4_reserve_window_node *this;
+
+       while (*p)
+       {
+               parent = *p;
+               this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
+
+               if (start < this->rsv_start)
+                       p = &(*p)->rb_left;
+               else if (start > this->rsv_end)
+                       p = &(*p)->rb_right;
+               else {
+                       rsv_window_dump(root, 1);
+                       BUG();
+               }
+       }
+
+       rb_link_node(node, parent, p);
+       rb_insert_color(node, root);
+}
+
+/**
+ * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree
+ * @sb:                        super block
+ * @rsv:               reservation window to remove
+ *
+ * Mark the block reservation window as not allocated, and unlink it
+ * from the filesystem reservation window rb tree. Must be called with
+ * rsv_lock hold.
+ */
+static void rsv_window_remove(struct super_block *sb,
+                             struct ext4_reserve_window_node *rsv)
+{
+       rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
+       rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
+       rsv->rsv_alloc_hit = 0;
+       rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
+}
+
+/*
+ * rsv_is_empty() -- Check if the reservation window is allocated.
+ * @rsv:               given reservation window to check
+ *
+ * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED.
+ */
+static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
+{
+       /* a valid reservation end block could not be 0 */
+       return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
+}
+
+/**
+ * ext4_init_block_alloc_info()
+ * @inode:             file inode structure
+ *
+ * Allocate and initialize the reservation window structure, and
+ * link the window to the ext4 inode structure at last
+ *
+ * The reservation window structure is only dynamically allocated
+ * and linked to ext4 inode the first time the open file
+ * needs a new block. So, before every ext4_new_block(s) call, for
+ * regular files, we should check whether the reservation window
+ * structure exists or not. In the latter case, this function is called.
+ * Fail to do so will result in block reservation being turned off for that
+ * open file.
+ *
+ * This function is called from ext4_get_blocks_handle(), also called
+ * when setting the reservation window size through ioctl before the file
+ * is open for write (needs block allocation).
+ *
+ * Needs truncate_mutex protection prior to call this function.
+ */
+void ext4_init_block_alloc_info(struct inode *inode)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
+       struct super_block *sb = inode->i_sb;
+
+       block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
+       if (block_i) {
+               struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
+
+               rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
+               rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
+
+               /*
+                * if filesystem is mounted with NORESERVATION, the goal
+                * reservation window size is set to zero to indicate
+                * block reservation is off
+                */
+               if (!test_opt(sb, RESERVATION))
+                       rsv->rsv_goal_size = 0;
+               else
+                       rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
+               rsv->rsv_alloc_hit = 0;
+               block_i->last_alloc_logical_block = 0;
+               block_i->last_alloc_physical_block = 0;
+       }
+       ei->i_block_alloc_info = block_i;
+}
+
+/**
+ * ext4_discard_reservation()
+ * @inode:             inode
+ *
+ * Discard(free) block reservation window on last file close, or truncate
+ * or at last iput().
+ *
+ * It is being called in three cases:
+ *     ext4_release_file(): last writer close the file
+ *     ext4_clear_inode(): last iput(), when nobody link to this file.
+ *     ext4_truncate(): when the block indirect map is about to change.
+ *
+ */
+void ext4_discard_reservation(struct inode *inode)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
+       struct ext4_reserve_window_node *rsv;
+       spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
+
+       if (!block_i)
+               return;
+
+       rsv = &block_i->rsv_window_node;
+       if (!rsv_is_empty(&rsv->rsv_window)) {
+               spin_lock(rsv_lock);
+               if (!rsv_is_empty(&rsv->rsv_window))
+                       rsv_window_remove(inode->i_sb, rsv);
+               spin_unlock(rsv_lock);
+       }
+}
+
+/**
+ * ext4_free_blocks_sb() -- Free given blocks and update quota
+ * @handle:                    handle to this transaction
+ * @sb:                                super block
+ * @block:                     start physcial block to free
+ * @count:                     number of blocks to free
+ * @pdquot_freed_blocks:       pointer to quota
+ */
+void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
+                        ext4_fsblk_t block, unsigned long count,
+                        unsigned long *pdquot_freed_blocks)
+{
+       struct buffer_head *bitmap_bh = NULL;
+       struct buffer_head *gd_bh;
+       unsigned long block_group;
+       ext4_grpblk_t bit;
+       unsigned long i;
+       unsigned long overflow;
+       struct ext4_group_desc * desc;
+       struct ext4_super_block * es;
+       struct ext4_sb_info *sbi;
+       int err = 0, ret;
+       ext4_grpblk_t group_freed;
+
+       *pdquot_freed_blocks = 0;
+       sbi = EXT4_SB(sb);
+       es = sbi->s_es;
+       if (block < le32_to_cpu(es->s_first_data_block) ||
+           block + count < block ||
+           block + count > ext4_blocks_count(es)) {
+               ext4_error (sb, "ext4_free_blocks",
+                           "Freeing blocks not in datazone - "
+                           "block = %llu, count = %lu", block, count);
+               goto error_return;
+       }
+
+       ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);
+
+do_more:
+       overflow = 0;
+       ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
+       /*
+        * Check to see if we are freeing blocks across a group
+        * boundary.
+        */
+       if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
+               overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
+               count -= overflow;
+       }
+       brelse(bitmap_bh);
+       bitmap_bh = read_block_bitmap(sb, block_group);
+       if (!bitmap_bh)
+               goto error_return;
+       desc = ext4_get_group_desc (sb, block_group, &gd_bh);
+       if (!desc)
+               goto error_return;
+
+       if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
+           in_range(ext4_inode_bitmap(sb, desc), block, count) ||
+           in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
+           in_range(block + count - 1, ext4_inode_table(sb, desc),
+                    sbi->s_itb_per_group))
+               ext4_error (sb, "ext4_free_blocks",
+                           "Freeing blocks in system zones - "
+                           "Block = %llu, count = %lu",
+                           block, count);
+
+       /*
+        * We are about to start releasing blocks in the bitmap,
+        * so we need undo access.
+        */
+       /* @@@ check errors */
+       BUFFER_TRACE(bitmap_bh, "getting undo access");
+       err = ext4_journal_get_undo_access(handle, bitmap_bh);
+       if (err)
+               goto error_return;
+
+       /*
+        * We are about to modify some metadata.  Call the journal APIs
+        * to unshare ->b_data if a currently-committing transaction is
+        * using it
+        */
+       BUFFER_TRACE(gd_bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, gd_bh);
+       if (err)
+               goto error_return;
+
+       jbd_lock_bh_state(bitmap_bh);
+
+       for (i = 0, group_freed = 0; i < count; i++) {
+               /*
+                * An HJ special.  This is expensive...
+                */
+#ifdef CONFIG_JBD_DEBUG
+               jbd_unlock_bh_state(bitmap_bh);
+               {
+                       struct buffer_head *debug_bh;
+                       debug_bh = sb_find_get_block(sb, block + i);
+                       if (debug_bh) {
+                               BUFFER_TRACE(debug_bh, "Deleted!");
+                               if (!bh2jh(bitmap_bh)->b_committed_data)
+                                       BUFFER_TRACE(debug_bh,
+                                               "No commited data in bitmap");
+                               BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
+                               __brelse(debug_bh);
+                       }
+               }
+               jbd_lock_bh_state(bitmap_bh);
+#endif
+               if (need_resched()) {
+                       jbd_unlock_bh_state(bitmap_bh);
+                       cond_resched();
+                       jbd_lock_bh_state(bitmap_bh);
+               }
+               /* @@@ This prevents newly-allocated data from being
+                * freed and then reallocated within the same
+                * transaction.
+                *
+                * Ideally we would want to allow that to happen, but to
+                * do so requires making jbd2_journal_forget() capable of
+                * revoking the queued write of a data block, which
+                * implies blocking on the journal lock.  *forget()
+                * cannot block due to truncate races.
+                *
+                * Eventually we can fix this by making jbd2_journal_forget()
+                * return a status indicating whether or not it was able
+                * to revoke the buffer.  On successful revoke, it is
+                * safe not to set the allocation bit in the committed
+                * bitmap, because we know that there is no outstanding
+                * activity on the buffer any more and so it is safe to
+                * reallocate it.
+                */
+               BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
+               J_ASSERT_BH(bitmap_bh,
+                               bh2jh(bitmap_bh)->b_committed_data != NULL);
+               ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
+                               bh2jh(bitmap_bh)->b_committed_data);
+
+               /*
+                * We clear the bit in the bitmap after setting the committed
+                * data bit, because this is the reverse order to that which
+                * the allocator uses.
+                */
+               BUFFER_TRACE(bitmap_bh, "clear bit");
+               if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
+                                               bit + i, bitmap_bh->b_data)) {
+                       jbd_unlock_bh_state(bitmap_bh);
+                       ext4_error(sb, __FUNCTION__,
+                                  "bit already cleared for block %llu",
+                                  (ext4_fsblk_t)(block + i));
+                       jbd_lock_bh_state(bitmap_bh);
+                       BUFFER_TRACE(bitmap_bh, "bit already cleared");
+               } else {
+                       group_freed++;
+               }
+       }
+       jbd_unlock_bh_state(bitmap_bh);
+
+       spin_lock(sb_bgl_lock(sbi, block_group));
+       desc->bg_free_blocks_count =
+               cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
+                       group_freed);
+       spin_unlock(sb_bgl_lock(sbi, block_group));
+       percpu_counter_mod(&sbi->s_freeblocks_counter, count);
+
+       /* We dirtied the bitmap block */
+       BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+       err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+
+       /* And the group descriptor block */
+       BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
+       ret = ext4_journal_dirty_metadata(handle, gd_bh);
+       if (!err) err = ret;
+       *pdquot_freed_blocks += group_freed;
+
+       if (overflow && !err) {
+               block += count;
+               count = overflow;
+               goto do_more;
+       }
+       sb->s_dirt = 1;
+error_return:
+       brelse(bitmap_bh);
+       ext4_std_error(sb, err);
+       return;
+}
+
+/**
+ * ext4_free_blocks() -- Free given blocks and update quota
+ * @handle:            handle for this transaction
+ * @inode:             inode
+ * @block:             start physical block to free
+ * @count:             number of blocks to count
+ */
+void ext4_free_blocks(handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t block, unsigned long count)
+{
+       struct super_block * sb;
+       unsigned long dquot_freed_blocks;
+
+       sb = inode->i_sb;
+       if (!sb) {
+               printk ("ext4_free_blocks: nonexistent device");
+               return;
+       }
+       ext4_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
+       if (dquot_freed_blocks)
+               DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+       return;
+}
+
+/**
+ * ext4_test_allocatable()
+ * @nr:                        given allocation block group
+ * @bh:                        bufferhead contains the bitmap of the given block group
+ *
+ * For ext4 allocations, we must not reuse any blocks which are
+ * allocated in the bitmap buffer's "last committed data" copy.  This
+ * prevents deletes from freeing up the page for reuse until we have
+ * committed the delete transaction.
+ *
+ * If we didn't do this, then deleting something and reallocating it as
+ * data would allow the old block to be overwritten before the
+ * transaction committed (because we force data to disk before commit).
+ * This would lead to corruption if we crashed between overwriting the
+ * data and committing the delete.
+ *
+ * @@@ We may want to make this allocation behaviour conditional on
+ * data-writes at some point, and disable it for metadata allocations or
+ * sync-data inodes.
+ */
+static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
+{
+       int ret;
+       struct journal_head *jh = bh2jh(bh);
+
+       if (ext4_test_bit(nr, bh->b_data))
+               return 0;
+
+       jbd_lock_bh_state(bh);
+       if (!jh->b_committed_data)
+               ret = 1;
+       else
+               ret = !ext4_test_bit(nr, jh->b_committed_data);
+       jbd_unlock_bh_state(bh);
+       return ret;
+}
+
+/**
+ * bitmap_search_next_usable_block()
+ * @start:             the starting block (group relative) of the search
+ * @bh:                        bufferhead contains the block group bitmap
+ * @maxblocks:         the ending block (group relative) of the reservation
+ *
+ * The bitmap search --- search forward alternately through the actual
+ * bitmap on disk and the last-committed copy in journal, until we find a
+ * bit free in both bitmaps.
+ */
+static ext4_grpblk_t
+bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
+                                       ext4_grpblk_t maxblocks)
+{
+       ext4_grpblk_t next;
+       struct journal_head *jh = bh2jh(bh);
+
+       while (start < maxblocks) {
+               next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
+               if (next >= maxblocks)
+                       return -1;
+               if (ext4_test_allocatable(next, bh))
+                       return next;
+               jbd_lock_bh_state(bh);
+               if (jh->b_committed_data)
+                       start = ext4_find_next_zero_bit(jh->b_committed_data,
+                                                       maxblocks, next);
+               jbd_unlock_bh_state(bh);
+       }
+       return -1;
+}
+
+/**
+ * find_next_usable_block()
+ * @start:             the starting block (group relative) to find next
+ *                     allocatable block in bitmap.
+ * @bh:                        bufferhead contains the block group bitmap
+ * @maxblocks:         the ending block (group relative) for the search
+ *
+ * Find an allocatable block in a bitmap.  We honor both the bitmap and
+ * its last-committed copy (if that exists), and perform the "most
+ * appropriate allocation" algorithm of looking for a free block near
+ * the initial goal; then for a free byte somewhere in the bitmap; then
+ * for any free bit in the bitmap.
+ */
+static ext4_grpblk_t
+find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
+                       ext4_grpblk_t maxblocks)
+{
+       ext4_grpblk_t here, next;
+       char *p, *r;
+
+       if (start > 0) {
+               /*
+                * The goal was occupied; search forward for a free
+                * block within the next XX blocks.
+                *
+                * end_goal is more or less random, but it has to be
+                * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the
+                * next 64-bit boundary is simple..
+                */
+               ext4_grpblk_t end_goal = (start + 63) & ~63;
+               if (end_goal > maxblocks)
+                       end_goal = maxblocks;
+               here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
+               if (here < end_goal && ext4_test_allocatable(here, bh))
+                       return here;
+               ext4_debug("Bit not found near goal\n");
+       }
+
+       here = start;
+       if (here < 0)
+               here = 0;
+
+       p = ((char *)bh->b_data) + (here >> 3);
+       r = memscan(p, 0, (maxblocks - here + 7) >> 3);
+       next = (r - ((char *)bh->b_data)) << 3;
+
+       if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
+               return next;
+
+       /*
+        * The bitmap search --- search forward alternately through the actual
+        * bitmap and the last-committed copy until we find a bit free in
+        * both
+        */
+       here = bitmap_search_next_usable_block(here, bh, maxblocks);
+       return here;
+}
+
+/**
+ * claim_block()
+ * @block:             the free block (group relative) to allocate
+ * @bh:                        the bufferhead containts the block group bitmap
+ *
+ * We think we can allocate this block in this bitmap.  Try to set the bit.
+ * If that succeeds then check that nobody has allocated and then freed the
+ * block since we saw that is was not marked in b_committed_data.  If it _was_
+ * allocated and freed then clear the bit in the bitmap again and return
+ * zero (failure).
+ */
+static inline int
+claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
+{
+       struct journal_head *jh = bh2jh(bh);
+       int ret;
+
+       if (ext4_set_bit_atomic(lock, block, bh->b_data))
+               return 0;
+       jbd_lock_bh_state(bh);
+       if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) {
+               ext4_clear_bit_atomic(lock, block, bh->b_data);
+               ret = 0;
+       } else {
+               ret = 1;
+       }
+       jbd_unlock_bh_state(bh);
+       return ret;
+}
+
+/**
+ * ext4_try_to_allocate()
+ * @sb:                        superblock
+ * @handle:            handle to this transaction
+ * @group:             given allocation block group
+ * @bitmap_bh:         bufferhead holds the block bitmap
+ * @grp_goal:          given target block within the group
+ * @count:             target number of blocks to allocate
+ * @my_rsv:            reservation window
+ *
+ * Attempt to allocate blocks within a give range. Set the range of allocation
+ * first, then find the first free bit(s) from the bitmap (within the range),
+ * and at last, allocate the blocks by claiming the found free bit as allocated.
+ *
+ * To set the range of this allocation:
+ *     if there is a reservation window, only try to allocate block(s) from the
+ *     file's own reservation window;
+ *     Otherwise, the allocation range starts from the give goal block, ends at
+ *     the block group's last block.
+ *
+ * If we failed to allocate the desired block then we may end up crossing to a
+ * new bitmap.  In that case we must release write access to the old one via
+ * ext4_journal_release_buffer(), else we'll run out of credits.
+ */
+static ext4_grpblk_t
+ext4_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
+                       struct buffer_head *bitmap_bh, ext4_grpblk_t grp_goal,
+                       unsigned long *count, struct ext4_reserve_window *my_rsv)
+{
+       ext4_fsblk_t group_first_block;
+       ext4_grpblk_t start, end;
+       unsigned long num = 0;
+
+       /* we do allocation within the reservation window if we have a window */
+       if (my_rsv) {
+               group_first_block = ext4_group_first_block_no(sb, group);
+               if (my_rsv->_rsv_start >= group_first_block)
+                       start = my_rsv->_rsv_start - group_first_block;
+               else
+                       /* reservation window cross group boundary */
+                       start = 0;
+               end = my_rsv->_rsv_end - group_first_block + 1;
+               if (end > EXT4_BLOCKS_PER_GROUP(sb))
+                       /* reservation window crosses group boundary */
+                       end = EXT4_BLOCKS_PER_GROUP(sb);
+               if ((start <= grp_goal) && (grp_goal < end))
+                       start = grp_goal;
+               else
+                       grp_goal = -1;
+       } else {
+               if (grp_goal > 0)
+                       start = grp_goal;
+               else
+                       start = 0;
+               end = EXT4_BLOCKS_PER_GROUP(sb);
+       }
+
+       BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
+
+repeat:
+       if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
+               grp_goal = find_next_usable_block(start, bitmap_bh, end);
+               if (grp_goal < 0)
+                       goto fail_access;
+               if (!my_rsv) {
+                       int i;
+
+                       for (i = 0; i < 7 && grp_goal > start &&
+                                       ext4_test_allocatable(grp_goal - 1,
+                                                               bitmap_bh);
+                                       i++, grp_goal--)
+                               ;
+               }
+       }
+       start = grp_goal;
+
+       if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
+               grp_goal, bitmap_bh)) {
+               /*
+                * The block was allocated by another thread, or it was
+                * allocated and then freed by another thread
+                */
+               start++;
+               grp_goal++;
+               if (start >= end)
+                       goto fail_access;
+               goto repeat;
+       }
+       num++;
+       grp_goal++;
+       while (num < *count && grp_goal < end
+               && ext4_test_allocatable(grp_goal, bitmap_bh)
+               && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
+                               grp_goal, bitmap_bh)) {
+               num++;
+               grp_goal++;
+       }
+       *count = num;
+       return grp_goal - num;
+fail_access:
+       *count = num;
+       return -1;
+}
+
+/**
+ *     find_next_reservable_window():
+ *             find a reservable space within the given range.
+ *             It does not allocate the reservation window for now:
+ *             alloc_new_reservation() will do the work later.
+ *
+ *     @search_head: the head of the searching list;
+ *             This is not necessarily the list head of the whole filesystem
+ *
+ *             We have both head and start_block to assist the search
+ *             for the reservable space. The list starts from head,
+ *             but we will shift to the place where start_block is,
+ *             then start from there, when looking for a reservable space.
+ *
+ *     @size: the target new reservation window size
+ *
+ *     @group_first_block: the first block we consider to start
+ *                     the real search from
+ *
+ *     @last_block:
+ *             the maximum block number that our goal reservable space
+ *             could start from. This is normally the last block in this
+ *             group. The search will end when we found the start of next
+ *             possible reservable space is out of this boundary.
+ *             This could handle the cross boundary reservation window
+ *             request.
+ *
+ *     basically we search from the given range, rather than the whole
+ *     reservation double linked list, (start_block, last_block)
+ *     to find a free region that is of my size and has not
+ *     been reserved.
+ *
+ */
+static int find_next_reservable_window(
+                               struct ext4_reserve_window_node *search_head,
+                               struct ext4_reserve_window_node *my_rsv,
+                               struct super_block * sb,
+                               ext4_fsblk_t start_block,
+                               ext4_fsblk_t last_block)
+{
+       struct rb_node *next;
+       struct ext4_reserve_window_node *rsv, *prev;
+       ext4_fsblk_t cur;
+       int size = my_rsv->rsv_goal_size;
+
+       /* TODO: make the start of the reservation window byte-aligned */
+       /* cur = *start_block & ~7;*/
+       cur = start_block;
+       rsv = search_head;
+       if (!rsv)
+               return -1;
+
+       while (1) {
+               if (cur <= rsv->rsv_end)
+                       cur = rsv->rsv_end + 1;
+
+               /* TODO?
+                * in the case we could not find a reservable space
+                * that is what is expected, during the re-search, we could
+                * remember what's the largest reservable space we could have
+                * and return that one.
+                *
+                * For now it will fail if we could not find the reservable
+                * space with expected-size (or more)...
+                */
+               if (cur > last_block)
+                       return -1;              /* fail */
+
+               prev = rsv;
+               next = rb_next(&rsv->rsv_node);
+               rsv = list_entry(next,struct ext4_reserve_window_node,rsv_node);
+
+               /*
+                * Reached the last reservation, we can just append to the
+                * previous one.
+                */
+               if (!next)
+                       break;
+
+               if (cur + size <= rsv->rsv_start) {
+                       /*
+                        * Found a reserveable space big enough.  We could
+                        * have a reservation across the group boundary here
+                        */
+                       break;
+               }
+       }
+       /*
+        * we come here either :
+        * when we reach the end of the whole list,
+        * and there is empty reservable space after last entry in the list.
+        * append it to the end of the list.
+        *
+        * or we found one reservable space in the middle of the list,
+        * return the reservation window that we could append to.
+        * succeed.
+        */
+
+       if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
+               rsv_window_remove(sb, my_rsv);
+
+       /*
+        * Let's book the whole avaliable window for now.  We will check the
+        * disk bitmap later and then, if there are free blocks then we adjust
+        * the window size if it's larger than requested.
+        * Otherwise, we will remove this node from the tree next time
+        * call find_next_reservable_window.
+        */
+       my_rsv->rsv_start = cur;
+       my_rsv->rsv_end = cur + size - 1;
+       my_rsv->rsv_alloc_hit = 0;
+
+       if (prev != my_rsv)
+               ext4_rsv_window_add(sb, my_rsv);
+
+       return 0;
+}
+
+/**
+ *     alloc_new_reservation()--allocate a new reservation window
+ *
+ *             To make a new reservation, we search part of the filesystem
+ *             reservation list (the list that inside the group). We try to
+ *             allocate a new reservation window near the allocation goal,
+ *             or the beginning of the group, if there is no goal.
+ *
+ *             We first find a reservable space after the goal, then from
+ *             there, we check the bitmap for the first free block after
+ *             it. If there is no free block until the end of group, then the
+ *             whole group is full, we failed. Otherwise, check if the free
+ *             block is inside the expected reservable space, if so, we
+ *             succeed.
+ *             If the first free block is outside the reservable space, then
+ *             start from the first free block, we search for next available
+ *             space, and go on.
+ *
+ *     on succeed, a new reservation will be found and inserted into the list
+ *     It contains at least one free block, and it does not overlap with other
+ *     reservation windows.
+ *
+ *     failed: we failed to find a reservation window in this group
+ *
+ *     @rsv: the reservation
+ *
+ *     @grp_goal: The goal (group-relative).  It is where the search for a
+ *             free reservable space should start from.
+ *             if we have a grp_goal(grp_goal >0 ), then start from there,
+ *             no grp_goal(grp_goal = -1), we start from the first block
+ *             of the group.
+ *
+ *     @sb: the super block
+ *     @group: the group we are trying to allocate in
+ *     @bitmap_bh: the block group block bitmap
+ *
+ */
+static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
+               ext4_grpblk_t grp_goal, struct super_block *sb,
+               unsigned int group, struct buffer_head *bitmap_bh)
+{
+       struct ext4_reserve_window_node *search_head;
+       ext4_fsblk_t group_first_block, group_end_block, start_block;
+       ext4_grpblk_t first_free_block;
+       struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
+       unsigned long size;
+       int ret;
+       spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
+
+       group_first_block = ext4_group_first_block_no(sb, group);
+       group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
+
+       if (grp_goal < 0)
+               start_block = group_first_block;
+       else
+               start_block = grp_goal + group_first_block;
+
+       size = my_rsv->rsv_goal_size;
+
+       if (!rsv_is_empty(&my_rsv->rsv_window)) {
+               /*
+                * if the old reservation is cross group boundary
+                * and if the goal is inside the old reservation window,
+                * we will come here when we just failed to allocate from
+                * the first part of the window. We still have another part
+                * that belongs to the next group. In this case, there is no
+                * point to discard our window and try to allocate a new one
+                * in this group(which will fail). we should
+                * keep the reservation window, just simply move on.
+                *
+                * Maybe we could shift the start block of the reservation
+                * window to the first block of next group.
+                */
+
+               if ((my_rsv->rsv_start <= group_end_block) &&
+                               (my_rsv->rsv_end > group_end_block) &&
+                               (start_block >= my_rsv->rsv_start))
+                       return -1;
+
+               if ((my_rsv->rsv_alloc_hit >
+                    (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
+                       /*
+                        * if the previously allocation hit ratio is
+                        * greater than 1/2, then we double the size of
+                        * the reservation window the next time,
+                        * otherwise we keep the same size window
+                        */
+                       size = size * 2;
+                       if (size > EXT4_MAX_RESERVE_BLOCKS)
+                               size = EXT4_MAX_RESERVE_BLOCKS;
+                       my_rsv->rsv_goal_size= size;
+               }
+       }
+
+       spin_lock(rsv_lock);
+       /*
+        * shift the search start to the window near the goal block
+        */
+       search_head = search_reserve_window(fs_rsv_root, start_block);
+
+       /*
+        * find_next_reservable_window() simply finds a reservable window
+        * inside the given range(start_block, group_end_block).
+        *
+        * To make sure the reservation window has a free bit inside it, we
+        * need to check the bitmap after we found a reservable window.
+        */
+retry:
+       ret = find_next_reservable_window(search_head, my_rsv, sb,
+                                               start_block, group_end_block);
+
+       if (ret == -1) {
+               if (!rsv_is_empty(&my_rsv->rsv_window))
+                       rsv_window_remove(sb, my_rsv);
+               spin_unlock(rsv_lock);
+               return -1;
+       }
+
+       /*
+        * On success, find_next_reservable_window() returns the
+        * reservation window where there is a reservable space after it.
+        * Before we reserve this reservable space, we need
+        * to make sure there is at least a free block inside this region.
+        *
+        * searching the first free bit on the block bitmap and copy of
+        * last committed bitmap alternatively, until we found a allocatable
+        * block. Search start from the start block of the reservable space
+        * we just found.
+        */
+       spin_unlock(rsv_lock);
+       first_free_block = bitmap_search_next_usable_block(
+                       my_rsv->rsv_start - group_first_block,
+                       bitmap_bh, group_end_block - group_first_block + 1);
+
+       if (first_free_block < 0) {
+               /*
+                * no free block left on the bitmap, no point
+                * to reserve the space. return failed.
+                */
+               spin_lock(rsv_lock);
+               if (!rsv_is_empty(&my_rsv->rsv_window))
+                       rsv_window_remove(sb, my_rsv);
+               spin_unlock(rsv_lock);
+               return -1;              /* failed */
+       }
+
+       start_block = first_free_block + group_first_block;
+       /*
+        * check if the first free block is within the
+        * free space we just reserved
+        */
+       if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end)
+               return 0;               /* success */
+       /*
+        * if the first free bit we found is out of the reservable space
+        * continue search for next reservable space,
+        * start from where the free block is,
+        * we also shift the list head to where we stopped last time
+        */
+       search_head = my_rsv;
+       spin_lock(rsv_lock);
+       goto retry;
+}
+
+/**
+ * try_to_extend_reservation()
+ * @my_rsv:            given reservation window
+ * @sb:                        super block
+ * @size:              the delta to extend
+ *
+ * Attempt to expand the reservation window large enough to have
+ * required number of free blocks
+ *
+ * Since ext4_try_to_allocate() will always allocate blocks within
+ * the reservation window range, if the window size is too small,
+ * multiple blocks allocation has to stop at the end of the reservation
+ * window. To make this more efficient, given the total number of
+ * blocks needed and the current size of the window, we try to
+ * expand the reservation window size if necessary on a best-effort
+ * basis before ext4_new_blocks() tries to allocate blocks,
+ */
+static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
+                       struct super_block *sb, int size)
+{
+       struct ext4_reserve_window_node *next_rsv;
+       struct rb_node *next;
+       spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
+
+       if (!spin_trylock(rsv_lock))
+               return;
+
+       next = rb_next(&my_rsv->rsv_node);
+
+       if (!next)
+               my_rsv->rsv_end += size;
+       else {
+               next_rsv = list_entry(next, struct ext4_reserve_window_node, rsv_node);
+
+               if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
+                       my_rsv->rsv_end += size;
+               else
+                       my_rsv->rsv_end = next_rsv->rsv_start - 1;
+       }
+       spin_unlock(rsv_lock);
+}
+
+/**
+ * ext4_try_to_allocate_with_rsv()
+ * @sb:                        superblock
+ * @handle:            handle to this transaction
+ * @group:             given allocation block group
+ * @bitmap_bh:         bufferhead holds the block bitmap
+ * @grp_goal:          given target block within the group
+ * @count:             target number of blocks to allocate
+ * @my_rsv:            reservation window
+ * @errp:              pointer to store the error code
+ *
+ * This is the main function used to allocate a new block and its reservation
+ * window.
+ *
+ * Each time when a new block allocation is need, first try to allocate from
+ * its own reservation.  If it does not have a reservation window, instead of
+ * looking for a free bit on bitmap first, then look up the reservation list to
+ * see if it is inside somebody else's reservation window, we try to allocate a
+ * reservation window for it starting from the goal first. Then do the block
+ * allocation within the reservation window.
+ *
+ * This will avoid keeping on searching the reservation list again and
+ * again when somebody is looking for a free block (without
+ * reservation), and there are lots of free blocks, but they are all
+ * being reserved.
+ *
+ * We use a red-black tree for the per-filesystem reservation list.
+ *
+ */
+static ext4_grpblk_t
+ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
+                       unsigned int group, struct buffer_head *bitmap_bh,
+                       ext4_grpblk_t grp_goal,
+                       struct ext4_reserve_window_node * my_rsv,
+                       unsigned long *count, int *errp)
+{
+       ext4_fsblk_t group_first_block, group_last_block;
+       ext4_grpblk_t ret = 0;
+       int fatal;
+       unsigned long num = *count;
+
+       *errp = 0;
+
+       /*
+        * Make sure we use undo access for the bitmap, because it is critical
+        * that we do the frozen_data COW on bitmap buffers in all cases even
+        * if the buffer is in BJ_Forget state in the committing transaction.
+        */
+       BUFFER_TRACE(bitmap_bh, "get undo access for new block");
+       fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
+       if (fatal) {
+               *errp = fatal;
+               return -1;
+       }
+
+       /*
+        * we don't deal with reservation when
+        * filesystem is mounted without reservation
+        * or the file is not a regular file
+        * or last attempt to allocate a block with reservation turned on failed
+        */
+       if (my_rsv == NULL ) {
+               ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
+                                               grp_goal, count, NULL);
+               goto out;
+       }
+       /*
+        * grp_goal is a group relative block number (if there is a goal)
+        * 0 < grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
+        * first block is a filesystem wide block number
+        * first block is the block number of the first block in this group
+        */
+       group_first_block = ext4_group_first_block_no(sb, group);
+       group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
+
+       /*
+        * Basically we will allocate a new block from inode's reservation
+        * window.
+        *
+        * We need to allocate a new reservation window, if:
+        * a) inode does not have a reservation window; or
+        * b) last attempt to allocate a block from existing reservation
+        *    failed; or
+        * c) we come here with a goal and with a reservation window
+        *
+        * We do not need to allocate a new reservation window if we come here
+        * at the beginning with a goal and the goal is inside the window, or
+        * we don't have a goal but already have a reservation window.
+        * then we could go to allocate from the reservation window directly.
+        */
+       while (1) {
+               if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
+                       !goal_in_my_reservation(&my_rsv->rsv_window,
+                                               grp_goal, group, sb)) {
+                       if (my_rsv->rsv_goal_size < *count)
+                               my_rsv->rsv_goal_size = *count;
+                       ret = alloc_new_reservation(my_rsv, grp_goal, sb,
+                                                       group, bitmap_bh);
+                       if (ret < 0)
+                               break;                  /* failed */
+
+                       if (!goal_in_my_reservation(&my_rsv->rsv_window,
+                                                       grp_goal, group, sb))
+                               grp_goal = -1;
+               } else if (grp_goal > 0 &&
+                         (my_rsv->rsv_end-grp_goal+1) < *count)
+                       try_to_extend_reservation(my_rsv, sb,
+                                       *count-my_rsv->rsv_end + grp_goal - 1);
+
+               if ((my_rsv->rsv_start > group_last_block) ||
+                               (my_rsv->rsv_end < group_first_block)) {
+                       rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
+                       BUG();
+               }
+               ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
+                                          grp_goal, &num, &my_rsv->rsv_window);
+               if (ret >= 0) {
+                       my_rsv->rsv_alloc_hit += num;
+                       *count = num;
+                       break;                          /* succeed */
+               }
+               num = *count;
+       }
+out:
+       if (ret >= 0) {
+               BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
+                                       "bitmap block");
+               fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
+               if (fatal) {
+                       *errp = fatal;
+                       return -1;
+               }
+               return ret;
+       }
+
+       BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
+       ext4_journal_release_buffer(handle, bitmap_bh);
+       return ret;
+}
+
+/**
+ * ext4_has_free_blocks()
+ * @sbi:               in-core super block structure.
+ *
+ * Check if filesystem has at least 1 free block available for allocation.
+ */
+static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
+{
+       ext4_fsblk_t free_blocks, root_blocks;
+
+       free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+       root_blocks = ext4_r_blocks_count(sbi->s_es);
+       if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
+               sbi->s_resuid != current->fsuid &&
+               (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
+               return 0;
+       }
+       return 1;
+}
+
+/**
+ * ext4_should_retry_alloc()
+ * @sb:                        super block
+ * @retries            number of attemps has been made
+ *
+ * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
+ * it is profitable to retry the operation, this function will wait
+ * for the current or commiting transaction to complete, and then
+ * return TRUE.
+ *
+ * if the total number of retries exceed three times, return FALSE.
+ */
+int ext4_should_retry_alloc(struct super_block *sb, int *retries)
+{
+       if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
+               return 0;
+
+       jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
+
+       return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
+}
+
+/**
+ * ext4_new_blocks() -- core block(s) allocation function
+ * @handle:            handle to this transaction
+ * @inode:             file inode
+ * @goal:              given target block(filesystem wide)
+ * @count:             target number of blocks to allocate
+ * @errp:              error code
+ *
+ * ext4_new_blocks uses a goal block to assist allocation.  It tries to
+ * allocate block(s) from the block group contains the goal block first. If that
+ * fails, it will try to allocate block(s) from other block groups without
+ * any specific goal block.
+ *
+ */
+ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t goal, unsigned long *count, int *errp)
+{
+       struct buffer_head *bitmap_bh = NULL;
+       struct buffer_head *gdp_bh;
+       unsigned long group_no;
+       int goal_group;
+       ext4_grpblk_t grp_target_blk;   /* blockgroup relative goal block */
+       ext4_grpblk_t grp_alloc_blk;    /* blockgroup-relative allocated block*/
+       ext4_fsblk_t ret_block;         /* filesyetem-wide allocated block */
+       int bgi;                        /* blockgroup iteration index */
+       int fatal = 0, err;
+       int performed_allocation = 0;
+       ext4_grpblk_t free_blocks;      /* number of free blocks in a group */
+       struct super_block *sb;
+       struct ext4_group_desc *gdp;
+       struct ext4_super_block *es;
+       struct ext4_sb_info *sbi;
+       struct ext4_reserve_window_node *my_rsv = NULL;
+       struct ext4_block_alloc_info *block_i;
+       unsigned short windowsz = 0;
+#ifdef EXT4FS_DEBUG
+       static int goal_hits, goal_attempts;
+#endif
+       unsigned long ngroups;
+       unsigned long num = *count;
+
+       *errp = -ENOSPC;
+       sb = inode->i_sb;
+       if (!sb) {
+               printk("ext4_new_block: nonexistent device");
+               return 0;
+       }
+
+       /*
+        * Check quota for allocation of this block.
+        */
+       if (DQUOT_ALLOC_BLOCK(inode, num)) {
+               *errp = -EDQUOT;
+               return 0;
+       }
+
+       sbi = EXT4_SB(sb);
+       es = EXT4_SB(sb)->s_es;
+       ext4_debug("goal=%lu.\n", goal);
+       /*
+        * Allocate a block from reservation only when
+        * filesystem is mounted with reservation(default,-o reservation), and
+        * it's a regular file, and
+        * the desired window size is greater than 0 (One could use ioctl
+        * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off
+        * reservation on that particular file)
+        */
+       block_i = EXT4_I(inode)->i_block_alloc_info;
+       if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
+               my_rsv = &block_i->rsv_window_node;
+
+       if (!ext4_has_free_blocks(sbi)) {
+               *errp = -ENOSPC;
+               goto out;
+       }
+
+       /*
+        * First, test whether the goal block is free.
+        */
+       if (goal < le32_to_cpu(es->s_first_data_block) ||
+           goal >= ext4_blocks_count(es))
+               goal = le32_to_cpu(es->s_first_data_block);
+       ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
+       goal_group = group_no;
+retry_alloc:
+       gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
+       if (!gdp)
+               goto io_error;
+
+       free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+       /*
+        * if there is not enough free blocks to make a new resevation
+        * turn off reservation for this allocation
+        */
+       if (my_rsv && (free_blocks < windowsz)
+               && (rsv_is_empty(&my_rsv->rsv_window)))
+               my_rsv = NULL;
+
+       if (free_blocks > 0) {
+               bitmap_bh = read_block_bitmap(sb, group_no);
+               if (!bitmap_bh)
+                       goto io_error;
+               grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
+                                       group_no, bitmap_bh, grp_target_blk,
+                                       my_rsv, &num, &fatal);
+               if (fatal)
+                       goto out;
+               if (grp_alloc_blk >= 0)
+                       goto allocated;
+       }
+
+       ngroups = EXT4_SB(sb)->s_groups_count;
+       smp_rmb();
+
+       /*
+        * Now search the rest of the groups.  We assume that
+        * i and gdp correctly point to the last group visited.
+        */
+       for (bgi = 0; bgi < ngroups; bgi++) {
+               group_no++;
+               if (group_no >= ngroups)
+                       group_no = 0;
+               gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
+               if (!gdp) {
+                       *errp = -EIO;
+                       goto out;
+               }
+               free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+               /*
+                * skip this group if the number of
+                * free blocks is less than half of the reservation
+                * window size.
+                */
+               if (free_blocks <= (windowsz/2))
+                       continue;
+
+               brelse(bitmap_bh);
+               bitmap_bh = read_block_bitmap(sb, group_no);
+               if (!bitmap_bh)
+                       goto io_error;
+               /*
+                * try to allocate block(s) from this group, without a goal(-1).
+                */
+               grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
+                                       group_no, bitmap_bh, -1, my_rsv,
+                                       &num, &fatal);
+               if (fatal)
+                       goto out;
+               if (grp_alloc_blk >= 0)
+                       goto allocated;
+       }
+       /*
+        * We may end up a bogus ealier ENOSPC error due to
+        * filesystem is "full" of reservations, but
+        * there maybe indeed free blocks avaliable on disk
+        * In this case, we just forget about the reservations
+        * just do block allocation as without reservations.
+        */
+       if (my_rsv) {
+               my_rsv = NULL;
+               group_no = goal_group;
+               goto retry_alloc;
+       }
+       /* No space left on the device */
+       *errp = -ENOSPC;
+       goto out;
+
+allocated:
+
+       ext4_debug("using block group %d(%d)\n",
+                       group_no, gdp->bg_free_blocks_count);
+
+       BUFFER_TRACE(gdp_bh, "get_write_access");
+       fatal = ext4_journal_get_write_access(handle, gdp_bh);
+       if (fatal)
+               goto out;
+
+       ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
+
+       if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
+           in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
+           in_range(ret_block, ext4_inode_table(sb, gdp),
+                    EXT4_SB(sb)->s_itb_per_group) ||
+           in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
+                    EXT4_SB(sb)->s_itb_per_group))
+               ext4_error(sb, "ext4_new_block",
+                           "Allocating block in system zone - "
+                           "blocks from %llu, length %lu",
+                            ret_block, num);
+
+       performed_allocation = 1;
+
+#ifdef CONFIG_JBD_DEBUG
+       {
+               struct buffer_head *debug_bh;
+
+               /* Record bitmap buffer state in the newly allocated block */
+               debug_bh = sb_find_get_block(sb, ret_block);
+               if (debug_bh) {
+                       BUFFER_TRACE(debug_bh, "state when allocated");
+                       BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
+                       brelse(debug_bh);
+               }
+       }
+       jbd_lock_bh_state(bitmap_bh);
+       spin_lock(sb_bgl_lock(sbi, group_no));
+       if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
+               int i;
+
+               for (i = 0; i < num; i++) {
+                       if (ext4_test_bit(grp_alloc_blk+i,
+                                       bh2jh(bitmap_bh)->b_committed_data)) {
+                               printk("%s: block was unexpectedly set in "
+                                       "b_committed_data\n", __FUNCTION__);
+                       }
+               }
+       }
+       ext4_debug("found bit %d\n", grp_alloc_blk);
+       spin_unlock(sb_bgl_lock(sbi, group_no));
+       jbd_unlock_bh_state(bitmap_bh);
+#endif
+
+       if (ret_block + num - 1 >= ext4_blocks_count(es)) {
+               ext4_error(sb, "ext4_new_block",
+                           "block(%llu) >= blocks count(%llu) - "
+                           "block_group = %lu, es == %p ", ret_block,
+                       ext4_blocks_count(es), group_no, es);
+               goto out;
+       }
+
+       /*
+        * It is up to the caller to add the new buffer to a journal
+        * list of some description.  We don't know in advance whether
+        * the caller wants to use it as metadata or data.
+        */
+       ext4_debug("allocating block %lu. Goal hits %d of %d.\n",
+                       ret_block, goal_hits, goal_attempts);
+
+       spin_lock(sb_bgl_lock(sbi, group_no));
+       gdp->bg_free_blocks_count =
+                       cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
+       spin_unlock(sb_bgl_lock(sbi, group_no));
+       percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
+
+       BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
+       err = ext4_journal_dirty_metadata(handle, gdp_bh);
+       if (!fatal)
+               fatal = err;
+
+       sb->s_dirt = 1;
+       if (fatal)
+               goto out;
+
+       *errp = 0;
+       brelse(bitmap_bh);
+       DQUOT_FREE_BLOCK(inode, *count-num);
+       *count = num;
+       return ret_block;
+
+io_error:
+       *errp = -EIO;
+out:
+       if (fatal) {
+               *errp = fatal;
+               ext4_std_error(sb, fatal);
+       }
+       /*
+        * Undo the block allocation
+        */
+       if (!performed_allocation)
+               DQUOT_FREE_BLOCK(inode, *count);
+       brelse(bitmap_bh);
+       return 0;
+}
+
+ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t goal, int *errp)
+{
+       unsigned long count = 1;
+
+       return ext4_new_blocks(handle, inode, goal, &count, errp);
+}
+
+/**
+ * ext4_count_free_blocks() -- count filesystem free blocks
+ * @sb:                superblock
+ *
+ * Adds up the number of free blocks from each block group.
+ */
+ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
+{
+       ext4_fsblk_t desc_count;
+       struct ext4_group_desc *gdp;
+       int i;
+       unsigned long ngroups = EXT4_SB(sb)->s_groups_count;
+#ifdef EXT4FS_DEBUG
+       struct ext4_super_block *es;
+       ext4_fsblk_t bitmap_count;
+       unsigned long x;
+       struct buffer_head *bitmap_bh = NULL;
+
+       es = EXT4_SB(sb)->s_es;
+       desc_count = 0;
+       bitmap_count = 0;
+       gdp = NULL;
+
+       smp_rmb();
+       for (i = 0; i < ngroups; i++) {
+               gdp = ext4_get_group_desc(sb, i, NULL);
+               if (!gdp)
+                       continue;
+               desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
+               brelse(bitmap_bh);
+               bitmap_bh = read_block_bitmap(sb, i);
+               if (bitmap_bh == NULL)
+                       continue;
+
+               x = ext4_count_free(bitmap_bh, sb->s_blocksize);
+               printk("group %d: stored = %d, counted = %lu\n",
+                       i, le16_to_cpu(gdp->bg_free_blocks_count), x);
+               bitmap_count += x;
+       }
+       brelse(bitmap_bh);
+       printk("ext4_count_free_blocks: stored = %llu"
+               ", computed = %llu, %llu\n",
+              EXT4_FREE_BLOCKS_COUNT(es),
+               desc_count, bitmap_count);
+       return bitmap_count;
+#else
+       desc_count = 0;
+       smp_rmb();
+       for (i = 0; i < ngroups; i++) {
+               gdp = ext4_get_group_desc(sb, i, NULL);
+               if (!gdp)
+                       continue;
+               desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
+       }
+
+       return desc_count;
+#endif
+}
+
+static inline int
+block_in_use(ext4_fsblk_t block, struct super_block *sb, unsigned char *map)
+{
+       ext4_grpblk_t offset;
+
+       ext4_get_group_no_and_offset(sb, block, NULL, &offset);
+       return ext4_test_bit (offset, map);
+}
+
+static inline int test_root(int a, int b)
+{
+       int num = b;
+
+       while (a > num)
+               num *= b;
+       return num == a;
+}
+
+static int ext4_group_sparse(int group)
+{
+       if (group <= 1)
+               return 1;
+       if (!(group & 1))
+               return 0;
+       return (test_root(group, 7) || test_root(group, 5) ||
+               test_root(group, 3));
+}
+
+/**
+ *     ext4_bg_has_super - number of blocks used by the superblock in group
+ *     @sb: superblock for filesystem
+ *     @group: group number to check
+ *
+ *     Return the number of blocks used by the superblock (primary or backup)
+ *     in this group.  Currently this will be only 0 or 1.
+ */
+int ext4_bg_has_super(struct super_block *sb, int group)
+{
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                               EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
+                       !ext4_group_sparse(group))
+               return 0;
+       return 1;
+}
+
+static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, int group)
+{
+       unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
+       unsigned long first = metagroup * EXT4_DESC_PER_BLOCK(sb);
+       unsigned long last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
+
+       if (group == first || group == first + 1 || group == last)
+               return 1;
+       return 0;
+}
+
+static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, int group)
+{
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                               EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
+                       !ext4_group_sparse(group))
+               return 0;
+       return EXT4_SB(sb)->s_gdb_count;
+}
+
+/**
+ *     ext4_bg_num_gdb - number of blocks used by the group table in group
+ *     @sb: superblock for filesystem
+ *     @group: group number to check
+ *
+ *     Return the number of blocks used by the group descriptor table
+ *     (primary or backup) in this group.  In the future there may be a
+ *     different number of descriptor blocks in each group.
+ */
+unsigned long ext4_bg_num_gdb(struct super_block *sb, int group)
+{
+       unsigned long first_meta_bg =
+                       le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
+       unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
+
+       if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
+                       metagroup < first_meta_bg)
+               return ext4_bg_num_gdb_nometa(sb,group);
+
+       return ext4_bg_num_gdb_meta(sb,group);
+
+}
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
new file mode 100644 (file)
index 0000000..11e93c1
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ *  linux/fs/ext4/bitmap.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/jbd2.h>
+#include <linux/ext4_fs.h>
+
+#ifdef EXT4FS_DEBUG
+
+static int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
+
+unsigned long ext4_count_free (struct buffer_head * map, unsigned int numchars)
+{
+       unsigned int i;
+       unsigned long sum = 0;
+
+       if (!map)
+               return (0);
+       for (i = 0; i < numchars; i++)
+               sum += nibblemap[map->b_data[i] & 0xf] +
+                       nibblemap[(map->b_data[i] >> 4) & 0xf];
+       return (sum);
+}
+
+#endif  /*  EXT4FS_DEBUG  */
+
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
new file mode 100644 (file)
index 0000000..f859578
--- /dev/null
@@ -0,0 +1,518 @@
+/*
+ *  linux/fs/ext4/dir.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/dir.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext4 directory handling functions
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ *
+ * Hash Tree Directory indexing (c) 2001  Daniel Phillips
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/ext4_fs.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+
+static unsigned char ext4_filetype_table[] = {
+       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+};
+
+static int ext4_readdir(struct file *, void *, filldir_t);
+static int ext4_dx_readdir(struct file * filp,
+                          void * dirent, filldir_t filldir);
+static int ext4_release_dir (struct inode * inode,
+                               struct file * filp);
+
+const struct file_operations ext4_dir_operations = {
+       .llseek         = generic_file_llseek,
+       .read           = generic_read_dir,
+       .readdir        = ext4_readdir,         /* we take BKL. needed?*/
+       .ioctl          = ext4_ioctl,           /* BKL held */
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = ext4_compat_ioctl,
+#endif
+       .fsync          = ext4_sync_file,       /* BKL held */
+#ifdef CONFIG_EXT4_INDEX
+       .release        = ext4_release_dir,
+#endif
+};
+
+
+static unsigned char get_dtype(struct super_block *sb, int filetype)
+{
+       if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
+           (filetype >= EXT4_FT_MAX))
+               return DT_UNKNOWN;
+
+       return (ext4_filetype_table[filetype]);
+}
+
+
+int ext4_check_dir_entry (const char * function, struct inode * dir,
+                         struct ext4_dir_entry_2 * de,
+                         struct buffer_head * bh,
+                         unsigned long offset)
+{
+       const char * error_msg = NULL;
+       const int rlen = le16_to_cpu(de->rec_len);
+
+       if (rlen < EXT4_DIR_REC_LEN(1))
+               error_msg = "rec_len is smaller than minimal";
+       else if (rlen % 4 != 0)
+               error_msg = "rec_len % 4 != 0";
+       else if (rlen < EXT4_DIR_REC_LEN(de->name_len))
+               error_msg = "rec_len is too small for name_len";
+       else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+               error_msg = "directory entry across blocks";
+       else if (le32_to_cpu(de->inode) >
+                       le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))
+               error_msg = "inode out of bounds";
+
+       if (error_msg != NULL)
+               ext4_error (dir->i_sb, function,
+                       "bad entry in directory #%lu: %s - "
+                       "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+                       dir->i_ino, error_msg, offset,
+                       (unsigned long) le32_to_cpu(de->inode),
+                       rlen, de->name_len);
+       return error_msg == NULL ? 1 : 0;
+}
+
+static int ext4_readdir(struct file * filp,
+                        void * dirent, filldir_t filldir)
+{
+       int error = 0;
+       unsigned long offset;
+       int i, stored;
+       struct ext4_dir_entry_2 *de;
+       struct super_block *sb;
+       int err;
+       struct inode *inode = filp->f_dentry->d_inode;
+       int ret = 0;
+
+       sb = inode->i_sb;
+
+#ifdef CONFIG_EXT4_INDEX
+       if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+                                   EXT4_FEATURE_COMPAT_DIR_INDEX) &&
+           ((EXT4_I(inode)->i_flags & EXT4_INDEX_FL) ||
+            ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
+               err = ext4_dx_readdir(filp, dirent, filldir);
+               if (err != ERR_BAD_DX_DIR) {
+                       ret = err;
+                       goto out;
+               }
+               /*
+                * We don't set the inode dirty flag since it's not
+                * critical that it get flushed back to the disk.
+                */
+               EXT4_I(filp->f_dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL;
+       }
+#endif
+       stored = 0;
+       offset = filp->f_pos & (sb->s_blocksize - 1);
+
+       while (!error && !stored && filp->f_pos < inode->i_size) {
+               unsigned long blk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb);
+               struct buffer_head map_bh;
+               struct buffer_head *bh = NULL;
+
+               map_bh.b_state = 0;
+               err = ext4_get_blocks_wrap(NULL, inode, blk, 1, &map_bh, 0, 0);
+               if (err > 0) {
+                       page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
+                               &filp->f_ra,
+                               filp,
+                               map_bh.b_blocknr >>
+                                       (PAGE_CACHE_SHIFT - inode->i_blkbits),
+                               1);
+                       bh = ext4_bread(NULL, inode, blk, 0, &err);
+               }
+
+               /*
+                * We ignore I/O errors on directories so users have a chance
+                * of recovering data when there's a bad sector
+                */
+               if (!bh) {
+                       ext4_error (sb, "ext4_readdir",
+                               "directory #%lu contains a hole at offset %lu",
+                               inode->i_ino, (unsigned long)filp->f_pos);
+                       filp->f_pos += sb->s_blocksize - offset;
+                       continue;
+               }
+
+revalidate:
+               /* If the dir block has changed since the last call to
+                * readdir(2), then we might be pointing to an invalid
+                * dirent right now.  Scan from the start of the block
+                * to make sure. */
+               if (filp->f_version != inode->i_version) {
+                       for (i = 0; i < sb->s_blocksize && i < offset; ) {
+                               de = (struct ext4_dir_entry_2 *)
+                                       (bh->b_data + i);
+                               /* It's too expensive to do a full
+                                * dirent test each time round this
+                                * loop, but we do have to test at
+                                * least that it is non-zero.  A
+                                * failure will be detected in the
+                                * dirent test below. */
+                               if (le16_to_cpu(de->rec_len) <
+                                               EXT4_DIR_REC_LEN(1))
+                                       break;
+                               i += le16_to_cpu(de->rec_len);
+                       }
+                       offset = i;
+                       filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1))
+                               | offset;
+                       filp->f_version = inode->i_version;
+               }
+
+               while (!error && filp->f_pos < inode->i_size
+                      && offset < sb->s_blocksize) {
+                       de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
+                       if (!ext4_check_dir_entry ("ext4_readdir", inode, de,
+                                                  bh, offset)) {
+                               /*
+                                * On error, skip the f_pos to the next block
+                                */
+                               filp->f_pos = (filp->f_pos |
+                                               (sb->s_blocksize - 1)) + 1;
+                               brelse (bh);
+                               ret = stored;
+                               goto out;
+                       }
+                       offset += le16_to_cpu(de->rec_len);
+                       if (le32_to_cpu(de->inode)) {
+                               /* We might block in the next section
+                                * if the data destination is
+                                * currently swapped out.  So, use a
+                                * version stamp to detect whether or
+                                * not the directory has been modified
+                                * during the copy operation.
+                                */
+                               unsigned long version = filp->f_version;
+
+                               error = filldir(dirent, de->name,
+                                               de->name_len,
+                                               filp->f_pos,
+                                               le32_to_cpu(de->inode),
+                                               get_dtype(sb, de->file_type));
+                               if (error)
+                                       break;
+                               if (version != filp->f_version)
+                                       goto revalidate;
+                               stored ++;
+                       }
+                       filp->f_pos += le16_to_cpu(de->rec_len);
+               }
+               offset = 0;
+               brelse (bh);
+       }
+out:
+       return ret;
+}
+
+#ifdef CONFIG_EXT4_INDEX
+/*
+ * These functions convert from the major/minor hash to an f_pos
+ * value.
+ *
+ * Currently we only use major hash numer.  This is unfortunate, but
+ * on 32-bit machines, the same VFS interface is used for lseek and
+ * llseek, so if we use the 64 bit offset, then the 32-bit versions of
+ * lseek/telldir/seekdir will blow out spectacularly, and from within
+ * the ext2 low-level routine, we don't know if we're being called by
+ * a 64-bit version of the system call or the 32-bit version of the
+ * system call.  Worse yet, NFSv2 only allows for a 32-bit readdir
+ * cookie.  Sigh.
+ */
+#define hash2pos(major, minor) (major >> 1)
+#define pos2maj_hash(pos)      ((pos << 1) & 0xffffffff)
+#define pos2min_hash(pos)      (0)
+
+/*
+ * This structure holds the nodes of the red-black tree used to store
+ * the directory entry in hash order.
+ */
+struct fname {
+       __u32           hash;
+       __u32           minor_hash;
+       struct rb_node  rb_hash;
+       struct fname    *next;
+       __u32           inode;
+       __u8            name_len;
+       __u8            file_type;
+       char            name[0];
+};
+
+/*
+ * This functoin implements a non-recursive way of freeing all of the
+ * nodes in the red-black tree.
+ */
+static void free_rb_tree_fname(struct rb_root *root)
+{
+       struct rb_node  *n = root->rb_node;
+       struct rb_node  *parent;
+       struct fname    *fname;
+
+       while (n) {
+               /* Do the node's children first */
+               if ((n)->rb_left) {
+                       n = n->rb_left;
+                       continue;
+               }
+               if (n->rb_right) {
+                       n = n->rb_right;
+                       continue;
+               }
+               /*
+                * The node has no children; free it, and then zero
+                * out parent's link to it.  Finally go to the
+                * beginning of the loop and try to free the parent
+                * node.
+                */
+               parent = rb_parent(n);
+               fname = rb_entry(n, struct fname, rb_hash);
+               while (fname) {
+                       struct fname * old = fname;
+                       fname = fname->next;
+                       kfree (old);
+               }
+               if (!parent)
+                       root->rb_node = NULL;
+               else if (parent->rb_left == n)
+                       parent->rb_left = NULL;
+               else if (parent->rb_right == n)
+                       parent->rb_right = NULL;
+               n = parent;
+       }
+       root->rb_node = NULL;
+}
+
+
+static struct dir_private_info *create_dir_info(loff_t pos)
+{
+       struct dir_private_info *p;
+
+       p = kmalloc(sizeof(struct dir_private_info), GFP_KERNEL);
+       if (!p)
+               return NULL;
+       p->root.rb_node = NULL;
+       p->curr_node = NULL;
+       p->extra_fname = NULL;
+       p->last_pos = 0;
+       p->curr_hash = pos2maj_hash(pos);
+       p->curr_minor_hash = pos2min_hash(pos);
+       p->next_hash = 0;
+       return p;
+}
+
+void ext4_htree_free_dir_info(struct dir_private_info *p)
+{
+       free_rb_tree_fname(&p->root);
+       kfree(p);
+}
+
+/*
+ * Given a directory entry, enter it into the fname rb tree.
+ */
+int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
+                            __u32 minor_hash,
+                            struct ext4_dir_entry_2 *dirent)
+{
+       struct rb_node **p, *parent = NULL;
+       struct fname * fname, *new_fn;
+       struct dir_private_info *info;
+       int len;
+
+       info = (struct dir_private_info *) dir_file->private_data;
+       p = &info->root.rb_node;
+
+       /* Create and allocate the fname structure */
+       len = sizeof(struct fname) + dirent->name_len + 1;
+       new_fn = kzalloc(len, GFP_KERNEL);
+       if (!new_fn)
+               return -ENOMEM;
+       new_fn->hash = hash;
+       new_fn->minor_hash = minor_hash;
+       new_fn->inode = le32_to_cpu(dirent->inode);
+       new_fn->name_len = dirent->name_len;
+       new_fn->file_type = dirent->file_type;
+       memcpy(new_fn->name, dirent->name, dirent->name_len);
+       new_fn->name[dirent->name_len] = 0;
+
+       while (*p) {
+               parent = *p;
+               fname = rb_entry(parent, struct fname, rb_hash);
+
+               /*
+                * If the hash and minor hash match up, then we put
+                * them on a linked list.  This rarely happens...
+                */
+               if ((new_fn->hash == fname->hash) &&
+                   (new_fn->minor_hash == fname->minor_hash)) {
+                       new_fn->next = fname->next;
+                       fname->next = new_fn;
+                       return 0;
+               }
+
+               if (new_fn->hash < fname->hash)
+                       p = &(*p)->rb_left;
+               else if (new_fn->hash > fname->hash)
+                       p = &(*p)->rb_right;
+               else if (new_fn->minor_hash < fname->minor_hash)
+                       p = &(*p)->rb_left;
+               else /* if (new_fn->minor_hash > fname->minor_hash) */
+                       p = &(*p)->rb_right;
+       }
+
+       rb_link_node(&new_fn->rb_hash, parent, p);
+       rb_insert_color(&new_fn->rb_hash, &info->root);
+       return 0;
+}
+
+
+
+/*
+ * This is a helper function for ext4_dx_readdir.  It calls filldir
+ * for all entres on the fname linked list.  (Normally there is only
+ * one entry on the linked list, unless there are 62 bit hash collisions.)
+ */
+static int call_filldir(struct file * filp, void * dirent,
+                       filldir_t filldir, struct fname *fname)
+{
+       struct dir_private_info *info = filp->private_data;
+       loff_t  curr_pos;
+       struct inode *inode = filp->f_dentry->d_inode;
+       struct super_block * sb;
+       int error;
+
+       sb = inode->i_sb;
+
+       if (!fname) {
+               printk("call_filldir: called with null fname?!?\n");
+               return 0;
+       }
+       curr_pos = hash2pos(fname->hash, fname->minor_hash);
+       while (fname) {
+               error = filldir(dirent, fname->name,
+                               fname->name_len, curr_pos,
+                               fname->inode,
+                               get_dtype(sb, fname->file_type));
+               if (error) {
+                       filp->f_pos = curr_pos;
+                       info->extra_fname = fname->next;
+                       return error;
+               }
+               fname = fname->next;
+       }
+       return 0;
+}
+
+static int ext4_dx_readdir(struct file * filp,
+                        void * dirent, filldir_t filldir)
+{
+       struct dir_private_info *info = filp->private_data;
+       struct inode *inode = filp->f_dentry->d_inode;
+       struct fname *fname;
+       int     ret;
+
+       if (!info) {
+               info = create_dir_info(filp->f_pos);
+               if (!info)
+                       return -ENOMEM;
+               filp->private_data = info;
+       }
+
+       if (filp->f_pos == EXT4_HTREE_EOF)
+               return 0;       /* EOF */
+
+       /* Some one has messed with f_pos; reset the world */
+       if (info->last_pos != filp->f_pos) {
+               free_rb_tree_fname(&info->root);
+               info->curr_node = NULL;
+               info->extra_fname = NULL;
+               info->curr_hash = pos2maj_hash(filp->f_pos);
+               info->curr_minor_hash = pos2min_hash(filp->f_pos);
+       }
+
+       /*
+        * If there are any leftover names on the hash collision
+        * chain, return them first.
+        */
+       if (info->extra_fname &&
+           call_filldir(filp, dirent, filldir, info->extra_fname))
+               goto finished;
+
+       if (!info->curr_node)
+               info->curr_node = rb_first(&info->root);
+
+       while (1) {
+               /*
+                * Fill the rbtree if we have no more entries,
+                * or the inode has changed since we last read in the
+                * cached entries.
+                */
+               if ((!info->curr_node) ||
+                   (filp->f_version != inode->i_version)) {
+                       info->curr_node = NULL;
+                       free_rb_tree_fname(&info->root);
+                       filp->f_version = inode->i_version;
+                       ret = ext4_htree_fill_tree(filp, info->curr_hash,
+                                                  info->curr_minor_hash,
+                                                  &info->next_hash);
+                       if (ret < 0)
+                               return ret;
+                       if (ret == 0) {
+                               filp->f_pos = EXT4_HTREE_EOF;
+                               break;
+                       }
+                       info->curr_node = rb_first(&info->root);
+               }
+
+               fname = rb_entry(info->curr_node, struct fname, rb_hash);
+               info->curr_hash = fname->hash;
+               info->curr_minor_hash = fname->minor_hash;
+               if (call_filldir(filp, dirent, filldir, fname))
+                       break;
+
+               info->curr_node = rb_next(info->curr_node);
+               if (!info->curr_node) {
+                       if (info->next_hash == ~0) {
+                               filp->f_pos = EXT4_HTREE_EOF;
+                               break;
+                       }
+                       info->curr_hash = info->next_hash;
+                       info->curr_minor_hash = 0;
+               }
+       }
+finished:
+       info->last_pos = filp->f_pos;
+       return 0;
+}
+
+static int ext4_release_dir (struct inode * inode, struct file * filp)
+{
+       if (filp->private_data)
+               ext4_htree_free_dir_info(filp->private_data);
+
+       return 0;
+}
+
+#endif
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
new file mode 100644 (file)
index 0000000..2608dce
--- /dev/null
@@ -0,0 +1,2152 @@
+/*
+ * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
+ * Written by Alex Tomas <alex@clusterfs.com>
+ *
+ * Architecture independence:
+ *   Copyright (c) 2005, Bull S.A.
+ *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
+ */
+
+/*
+ * Extents support for EXT4
+ *
+ * TODO:
+ *   - ext4*_error() should be used in some situations
+ *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
+ *   - smart tree reduction
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/jbd.h>
+#include <linux/smp_lock.h>
+#include <linux/highuid.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ext4_fs_extents.h>
+#include <asm/uaccess.h>
+
+
+/*
+ * ext_pblock:
+ * combine low and high parts of physical block number into ext4_fsblk_t
+ */
+static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
+{
+       ext4_fsblk_t block;
+
+       block = le32_to_cpu(ex->ee_start);
+       block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
+       return block;
+}
+
+/*
+ * idx_pblock:
+ * combine low and high parts of a leaf physical block number into ext4_fsblk_t
+ */
+static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
+{
+       ext4_fsblk_t block;
+
+       block = le32_to_cpu(ix->ei_leaf);
+       block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
+       return block;
+}
+
+/*
+ * ext4_ext_store_pblock:
+ * stores a large physical block number into an extent struct,
+ * breaking it into parts
+ */
+static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
+{
+       ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
+       ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
+}
+
+/*
+ * ext4_idx_store_pblock:
+ * stores a large physical block number into an index struct,
+ * breaking it into parts
+ */
+static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
+{
+       ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
+       ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
+}
+
+static int ext4_ext_check_header(const char *function, struct inode *inode,
+                               struct ext4_extent_header *eh)
+{
+       const char *error_msg = NULL;
+
+       if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
+               error_msg = "invalid magic";
+               goto corrupted;
+       }
+       if (unlikely(eh->eh_max == 0)) {
+               error_msg = "invalid eh_max";
+               goto corrupted;
+       }
+       if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
+               error_msg = "invalid eh_entries";
+               goto corrupted;
+       }
+       return 0;
+
+corrupted:
+       ext4_error(inode->i_sb, function,
+                       "bad header in inode #%lu: %s - magic %x, "
+                       "entries %u, max %u, depth %u",
+                       inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
+                       le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
+                       le16_to_cpu(eh->eh_depth));
+
+       return -EIO;
+}
+
+static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
+{
+       int err;
+
+       if (handle->h_buffer_credits > needed)
+               return handle;
+       if (!ext4_journal_extend(handle, needed))
+               return handle;
+       err = ext4_journal_restart(handle, needed);
+
+       return handle;
+}
+
+/*
+ * could return:
+ *  - EROFS
+ *  - ENOMEM
+ */
+static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
+                               struct ext4_ext_path *path)
+{
+       if (path->p_bh) {
+               /* path points to block */
+               return ext4_journal_get_write_access(handle, path->p_bh);
+       }
+       /* path points to leaf/index in inode body */
+       /* we use in-core data, no need to protect them */
+       return 0;
+}
+
+/*
+ * could return:
+ *  - EROFS
+ *  - ENOMEM
+ *  - EIO
+ */
+static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
+                               struct ext4_ext_path *path)
+{
+       int err;
+       if (path->p_bh) {
+               /* path points to block */
+               err = ext4_journal_dirty_metadata(handle, path->p_bh);
+       } else {
+               /* path points to leaf/index in inode body */
+               err = ext4_mark_inode_dirty(handle, inode);
+       }
+       return err;
+}
+
+static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
+                             struct ext4_ext_path *path,
+                             ext4_fsblk_t block)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       ext4_fsblk_t bg_start;
+       ext4_grpblk_t colour;
+       int depth;
+
+       if (path) {
+               struct ext4_extent *ex;
+               depth = path->p_depth;
+
+               /* try to predict block placement */
+               if ((ex = path[depth].p_ext))
+                       return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
+
+               /* it looks like index is empty;
+                * try to find starting block from index itself */
+               if (path[depth].p_bh)
+                       return path[depth].p_bh->b_blocknr;
+       }
+
+       /* OK. use inode's group */
+       bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
+               le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
+       colour = (current->pid % 16) *
+                       (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+       return bg_start + colour + block;
+}
+
+static ext4_fsblk_t
+ext4_ext_new_block(handle_t *handle, struct inode *inode,
+                       struct ext4_ext_path *path,
+                       struct ext4_extent *ex, int *err)
+{
+       ext4_fsblk_t goal, newblock;
+
+       goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
+       newblock = ext4_new_block(handle, inode, goal, err);
+       return newblock;
+}
+
+static inline int ext4_ext_space_block(struct inode *inode)
+{
+       int size;
+
+       size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
+                       / sizeof(struct ext4_extent);
+#ifdef AGRESSIVE_TEST
+       if (size > 6)
+               size = 6;
+#endif
+       return size;
+}
+
+static inline int ext4_ext_space_block_idx(struct inode *inode)
+{
+       int size;
+
+       size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
+                       / sizeof(struct ext4_extent_idx);
+#ifdef AGRESSIVE_TEST
+       if (size > 5)
+               size = 5;
+#endif
+       return size;
+}
+
+static inline int ext4_ext_space_root(struct inode *inode)
+{
+       int size;
+
+       size = sizeof(EXT4_I(inode)->i_data);
+       size -= sizeof(struct ext4_extent_header);
+       size /= sizeof(struct ext4_extent);
+#ifdef AGRESSIVE_TEST
+       if (size > 3)
+               size = 3;
+#endif
+       return size;
+}
+
+static inline int ext4_ext_space_root_idx(struct inode *inode)
+{
+       int size;
+
+       size = sizeof(EXT4_I(inode)->i_data);
+       size -= sizeof(struct ext4_extent_header);
+       size /= sizeof(struct ext4_extent_idx);
+#ifdef AGRESSIVE_TEST
+       if (size > 4)
+               size = 4;
+#endif
+       return size;
+}
+
+#ifdef EXT_DEBUG
+static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
+{
+       int k, l = path->p_depth;
+
+       ext_debug("path:");
+       for (k = 0; k <= l; k++, path++) {
+               if (path->p_idx) {
+                 ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
+                           idx_pblock(path->p_idx));
+               } else if (path->p_ext) {
+                       ext_debug("  %d:%d:%llu ",
+                                 le32_to_cpu(path->p_ext->ee_block),
+                                 le16_to_cpu(path->p_ext->ee_len),
+                                 ext_pblock(path->p_ext));
+               } else
+                       ext_debug("  []");
+       }
+       ext_debug("\n");
+}
+
+static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
+{
+       int depth = ext_depth(inode);
+       struct ext4_extent_header *eh;
+       struct ext4_extent *ex;
+       int i;
+
+       if (!path)
+               return;
+
+       eh = path[depth].p_hdr;
+       ex = EXT_FIRST_EXTENT(eh);
+
+       for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
+               ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
+                         le16_to_cpu(ex->ee_len), ext_pblock(ex));
+       }
+       ext_debug("\n");
+}
+#else
+#define ext4_ext_show_path(inode,path)
+#define ext4_ext_show_leaf(inode,path)
+#endif
+
+static void ext4_ext_drop_refs(struct ext4_ext_path *path)
+{
+       int depth = path->p_depth;
+       int i;
+
+       for (i = 0; i <= depth; i++, path++)
+               if (path->p_bh) {
+                       brelse(path->p_bh);
+                       path->p_bh = NULL;
+               }
+}
+
+/*
+ * ext4_ext_binsearch_idx:
+ * binary search for the closest index of the given block
+ */
+static void
+ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
+{
+       struct ext4_extent_header *eh = path->p_hdr;
+       struct ext4_extent_idx *r, *l, *m;
+
+       BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
+       BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
+       BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
+
+       ext_debug("binsearch for %d(idx):  ", block);
+
+       l = EXT_FIRST_INDEX(eh) + 1;
+       r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
+       while (l <= r) {
+               m = l + (r - l) / 2;
+               if (block < le32_to_cpu(m->ei_block))
+                       r = m - 1;
+               else
+                       l = m + 1;
+               ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
+                               m, m->ei_block, r, r->ei_block);
+       }
+
+       path->p_idx = l - 1;
+       ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
+                 idx_block(path->p_idx));
+
+#ifdef CHECK_BINSEARCH
+       {
+               struct ext4_extent_idx *chix, *ix;
+               int k;
+
+               chix = ix = EXT_FIRST_INDEX(eh);
+               for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
+                 if (k != 0 &&
+                     le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
+                               printk("k=%d, ix=0x%p, first=0x%p\n", k,
+                                       ix, EXT_FIRST_INDEX(eh));
+                               printk("%u <= %u\n",
+                                      le32_to_cpu(ix->ei_block),
+                                      le32_to_cpu(ix[-1].ei_block));
+                       }
+                       BUG_ON(k && le32_to_cpu(ix->ei_block)
+                                          <= le32_to_cpu(ix[-1].ei_block));
+                       if (block < le32_to_cpu(ix->ei_block))
+                               break;
+                       chix = ix;
+               }
+               BUG_ON(chix != path->p_idx);
+       }
+#endif
+
+}
+
+/*
+ * ext4_ext_binsearch:
+ * binary search for closest extent of the given block
+ */
+static void
+ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
+{
+       struct ext4_extent_header *eh = path->p_hdr;
+       struct ext4_extent *r, *l, *m;
+
+       BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
+       BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
+
+       if (eh->eh_entries == 0) {
+               /*
+                * this leaf is empty:
+                * we get such a leaf in split/add case
+                */
+               return;
+       }
+
+       ext_debug("binsearch for %d:  ", block);
+
+       l = EXT_FIRST_EXTENT(eh) + 1;
+       r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
+
+       while (l <= r) {
+               m = l + (r - l) / 2;
+               if (block < le32_to_cpu(m->ee_block))
+                       r = m - 1;
+               else
+                       l = m + 1;
+               ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
+                               m, m->ee_block, r, r->ee_block);
+       }
+
+       path->p_ext = l - 1;
+       ext_debug("  -> %d:%llu:%d ",
+                       le32_to_cpu(path->p_ext->ee_block),
+                       ext_pblock(path->p_ext),
+                       le16_to_cpu(path->p_ext->ee_len));
+
+#ifdef CHECK_BINSEARCH
+       {
+               struct ext4_extent *chex, *ex;
+               int k;
+
+               chex = ex = EXT_FIRST_EXTENT(eh);
+               for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
+                       BUG_ON(k && le32_to_cpu(ex->ee_block)
+                                         <= le32_to_cpu(ex[-1].ee_block));
+                       if (block < le32_to_cpu(ex->ee_block))
+                               break;
+                       chex = ex;
+               }
+               BUG_ON(chex != path->p_ext);
+       }
+#endif
+
+}
+
+int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
+{
+       struct ext4_extent_header *eh;
+
+       eh = ext_inode_hdr(inode);
+       eh->eh_depth = 0;
+       eh->eh_entries = 0;
+       eh->eh_magic = EXT4_EXT_MAGIC;
+       eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
+       ext4_mark_inode_dirty(handle, inode);
+       ext4_ext_invalidate_cache(inode);
+       return 0;
+}
+
+struct ext4_ext_path *
+ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
+{
+       struct ext4_extent_header *eh;
+       struct buffer_head *bh;
+       short int depth, i, ppos = 0, alloc = 0;
+
+       eh = ext_inode_hdr(inode);
+       BUG_ON(eh == NULL);
+       if (ext4_ext_check_header(__FUNCTION__, inode, eh))
+               return ERR_PTR(-EIO);
+
+       i = depth = ext_depth(inode);
+
+       /* account possible depth increase */
+       if (!path) {
+               path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2),
+                               GFP_NOFS);
+               if (!path)
+                       return ERR_PTR(-ENOMEM);
+               alloc = 1;
+       }
+       memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
+       path[0].p_hdr = eh;
+
+       /* walk through the tree */
+       while (i) {
+               ext_debug("depth %d: num %d, max %d\n",
+                         ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
+               ext4_ext_binsearch_idx(inode, path + ppos, block);
+               path[ppos].p_block = idx_pblock(path[ppos].p_idx);
+               path[ppos].p_depth = i;
+               path[ppos].p_ext = NULL;
+
+               bh = sb_bread(inode->i_sb, path[ppos].p_block);
+               if (!bh)
+                       goto err;
+
+               eh = ext_block_hdr(bh);
+               ppos++;
+               BUG_ON(ppos > depth);
+               path[ppos].p_bh = bh;
+               path[ppos].p_hdr = eh;
+               i--;
+
+               if (ext4_ext_check_header(__FUNCTION__, inode, eh))
+                       goto err;
+       }
+
+       path[ppos].p_depth = i;
+       path[ppos].p_hdr = eh;
+       path[ppos].p_ext = NULL;
+       path[ppos].p_idx = NULL;
+
+       if (ext4_ext_check_header(__FUNCTION__, inode, eh))
+               goto err;
+
+       /* find extent */
+       ext4_ext_binsearch(inode, path + ppos, block);
+
+       ext4_ext_show_path(inode, path);
+
+       return path;
+
+err:
+       ext4_ext_drop_refs(path);
+       if (alloc)
+               kfree(path);
+       return ERR_PTR(-EIO);
+}
+
+/*
+ * ext4_ext_insert_index:
+ * insert new index [@logical;@ptr] into the block at @curp;
+ * check where to insert: before @curp or after @curp
+ */
+static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
+                               struct ext4_ext_path *curp,
+                               int logical, ext4_fsblk_t ptr)
+{
+       struct ext4_extent_idx *ix;
+       int len, err;
+
+       if ((err = ext4_ext_get_access(handle, inode, curp)))
+               return err;
+
+       BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
+       len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
+       if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
+               /* insert after */
+               if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
+                       len = (len - 1) * sizeof(struct ext4_extent_idx);
+                       len = len < 0 ? 0 : len;
+                       ext_debug("insert new index %d after: %d. "
+                                       "move %d from 0x%p to 0x%p\n",
+                                       logical, ptr, len,
+                                       (curp->p_idx + 1), (curp->p_idx + 2));
+                       memmove(curp->p_idx + 2, curp->p_idx + 1, len);
+               }
+               ix = curp->p_idx + 1;
+       } else {
+               /* insert before */
+               len = len * sizeof(struct ext4_extent_idx);
+               len = len < 0 ? 0 : len;
+               ext_debug("insert new index %d before: %d. "
+                               "move %d from 0x%p to 0x%p\n",
+                               logical, ptr, len,
+                               curp->p_idx, (curp->p_idx + 1));
+               memmove(curp->p_idx + 1, curp->p_idx, len);
+               ix = curp->p_idx;
+       }
+
+       ix->ei_block = cpu_to_le32(logical);
+       ext4_idx_store_pblock(ix, ptr);
+       curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
+
+       BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
+                            > le16_to_cpu(curp->p_hdr->eh_max));
+       BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
+
+       err = ext4_ext_dirty(handle, inode, curp);
+       ext4_std_error(inode->i_sb, err);
+
+       return err;
+}
+
+/*
+ * ext4_ext_split:
+ * inserts new subtree into the path, using free index entry
+ * at depth @at:
+ * - allocates all needed blocks (new leaf and all intermediate index blocks)
+ * - makes decision where to split
+ * - moves remaining extents and index entries (right to the split point)
+ *   into the newly allocated blocks
+ * - initializes subtree
+ */
+static int ext4_ext_split(handle_t *handle, struct inode *inode,
+                               struct ext4_ext_path *path,
+                               struct ext4_extent *newext, int at)
+{
+       struct buffer_head *bh = NULL;
+       int depth = ext_depth(inode);
+       struct ext4_extent_header *neh;
+       struct ext4_extent_idx *fidx;
+       struct ext4_extent *ex;
+       int i = at, k, m, a;
+       ext4_fsblk_t newblock, oldblock;
+       __le32 border;
+       ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
+       int err = 0;
+
+       /* make decision: where to split? */
+       /* FIXME: now decision is simplest: at current extent */
+
+       /* if current leaf will be split, then we should use
+        * border from split point */
+       BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
+       if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
+               border = path[depth].p_ext[1].ee_block;
+               ext_debug("leaf will be split."
+                               " next leaf starts at %d\n",
+                                 le32_to_cpu(border));
+       } else {
+               border = newext->ee_block;
+               ext_debug("leaf will be added."
+                               " next leaf starts at %d\n",
+                               le32_to_cpu(border));
+       }
+
+       /*
+        * If error occurs, then we break processing
+        * and mark filesystem read-only. index won't
+        * be inserted and tree will be in consistent
+        * state. Next mount will repair buffers too.
+        */
+
+       /*
+        * Get array to track all allocated blocks.
+        * We need this to handle errors and free blocks
+        * upon them.
+        */
+       ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
+       if (!ablocks)
+               return -ENOMEM;
+       memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth);
+
+       /* allocate all needed blocks */
+       ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
+       for (a = 0; a < depth - at; a++) {
+               newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
+               if (newblock == 0)
+                       goto cleanup;
+               ablocks[a] = newblock;
+       }
+
+       /* initialize new leaf */
+       newblock = ablocks[--a];
+       BUG_ON(newblock == 0);
+       bh = sb_getblk(inode->i_sb, newblock);
+       if (!bh) {
+               err = -EIO;
+               goto cleanup;
+       }
+       lock_buffer(bh);
+
+       if ((err = ext4_journal_get_create_access(handle, bh)))
+               goto cleanup;
+
+       neh = ext_block_hdr(bh);
+       neh->eh_entries = 0;
+       neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
+       neh->eh_magic = EXT4_EXT_MAGIC;
+       neh->eh_depth = 0;
+       ex = EXT_FIRST_EXTENT(neh);
+
+       /* move remainder of path[depth] to the new leaf */
+       BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
+       /* start copy from next extent */
+       /* TODO: we could do it by single memmove */
+       m = 0;
+       path[depth].p_ext++;
+       while (path[depth].p_ext <=
+                       EXT_MAX_EXTENT(path[depth].p_hdr)) {
+               ext_debug("move %d:%llu:%d in new leaf %llu\n",
+                               le32_to_cpu(path[depth].p_ext->ee_block),
+                               ext_pblock(path[depth].p_ext),
+                               le16_to_cpu(path[depth].p_ext->ee_len),
+                               newblock);
+               /*memmove(ex++, path[depth].p_ext++,
+                               sizeof(struct ext4_extent));
+               neh->eh_entries++;*/
+               path[depth].p_ext++;
+               m++;
+       }
+       if (m) {
+               memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
+               neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
+       }
+
+       set_buffer_uptodate(bh);
+       unlock_buffer(bh);
+
+       if ((err = ext4_journal_dirty_metadata(handle, bh)))
+               goto cleanup;
+       brelse(bh);
+       bh = NULL;
+
+       /* correct old leaf */
+       if (m) {
+               if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+                       goto cleanup;
+               path[depth].p_hdr->eh_entries =
+                    cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
+               if ((err = ext4_ext_dirty(handle, inode, path + depth)))
+                       goto cleanup;
+
+       }
+
+       /* create intermediate indexes */
+       k = depth - at - 1;
+       BUG_ON(k < 0);
+       if (k)
+               ext_debug("create %d intermediate indices\n", k);
+       /* insert new index into current index block */
+       /* current depth stored in i var */
+       i = depth - 1;
+       while (k--) {
+               oldblock = newblock;
+               newblock = ablocks[--a];
+               bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock);
+               if (!bh) {
+                       err = -EIO;
+                       goto cleanup;
+               }
+               lock_buffer(bh);
+
+               if ((err = ext4_journal_get_create_access(handle, bh)))
+                       goto cleanup;
+
+               neh = ext_block_hdr(bh);
+               neh->eh_entries = cpu_to_le16(1);
+               neh->eh_magic = EXT4_EXT_MAGIC;
+               neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
+               neh->eh_depth = cpu_to_le16(depth - i);
+               fidx = EXT_FIRST_INDEX(neh);
+               fidx->ei_block = border;
+               ext4_idx_store_pblock(fidx, oldblock);
+
+               ext_debug("int.index at %d (block %llu): %lu -> %llu\n", i,
+                               newblock, (unsigned long) le32_to_cpu(border),
+                               oldblock);
+               /* copy indexes */
+               m = 0;
+               path[i].p_idx++;
+
+               ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
+                               EXT_MAX_INDEX(path[i].p_hdr));
+               BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
+                               EXT_LAST_INDEX(path[i].p_hdr));
+               while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
+                       ext_debug("%d: move %d:%d in new index %llu\n", i,
+                                       le32_to_cpu(path[i].p_idx->ei_block),
+                                       idx_pblock(path[i].p_idx),
+                                       newblock);
+                       /*memmove(++fidx, path[i].p_idx++,
+                                       sizeof(struct ext4_extent_idx));
+                       neh->eh_entries++;
+                       BUG_ON(neh->eh_entries > neh->eh_max);*/
+                       path[i].p_idx++;
+                       m++;
+               }
+               if (m) {
+                       memmove(++fidx, path[i].p_idx - m,
+                               sizeof(struct ext4_extent_idx) * m);
+                       neh->eh_entries =
+                               cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
+               }
+               set_buffer_uptodate(bh);
+               unlock_buffer(bh);
+
+               if ((err = ext4_journal_dirty_metadata(handle, bh)))
+                       goto cleanup;
+               brelse(bh);
+               bh = NULL;
+
+               /* correct old index */
+               if (m) {
+                       err = ext4_ext_get_access(handle, inode, path + i);
+                       if (err)
+                               goto cleanup;
+                       path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
+                       err = ext4_ext_dirty(handle, inode, path + i);
+                       if (err)
+                               goto cleanup;
+               }
+
+               i--;
+       }
+
+       /* insert new index */
+       if (err)
+               goto cleanup;
+
+       err = ext4_ext_insert_index(handle, inode, path + at,
+                                   le32_to_cpu(border), newblock);
+
+cleanup:
+       if (bh) {
+               if (buffer_locked(bh))
+                       unlock_buffer(bh);
+               brelse(bh);
+       }
+
+       if (err) {
+               /* free all allocated blocks in error case */
+               for (i = 0; i < depth; i++) {
+                       if (!ablocks[i])
+                               continue;
+                       ext4_free_blocks(handle, inode, ablocks[i], 1);
+               }
+       }
+       kfree(ablocks);
+
+       return err;
+}
+
+/*
+ * ext4_ext_grow_indepth:
+ * implements tree growing procedure:
+ * - allocates new block
+ * - moves top-level data (index block or leaf) into the new block
+ * - initializes new top-level, creating index that points to the
+ *   just created block
+ */
+static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
+                                       struct ext4_ext_path *path,
+                                       struct ext4_extent *newext)
+{
+       struct ext4_ext_path *curp = path;
+       struct ext4_extent_header *neh;
+       struct ext4_extent_idx *fidx;
+       struct buffer_head *bh;
+       ext4_fsblk_t newblock;
+       int err = 0;
+
+       newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
+       if (newblock == 0)
+               return err;
+
+       bh = sb_getblk(inode->i_sb, newblock);
+       if (!bh) {
+               err = -EIO;
+               ext4_std_error(inode->i_sb, err);
+               return err;
+       }
+       lock_buffer(bh);
+
+       if ((err = ext4_journal_get_create_access(handle, bh))) {
+               unlock_buffer(bh);
+               goto out;
+       }
+
+       /* move top-level index/leaf into new block */
+       memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
+
+       /* set size of new block */
+       neh = ext_block_hdr(bh);
+       /* old root could have indexes or leaves
+        * so calculate e_max right way */
+       if (ext_depth(inode))
+         neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
+       else
+         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
+       neh->eh_magic = EXT4_EXT_MAGIC;
+       set_buffer_uptodate(bh);
+       unlock_buffer(bh);
+
+       if ((err = ext4_journal_dirty_metadata(handle, bh)))
+               goto out;
+
+       /* create index in new top-level index: num,max,pointer */
+       if ((err = ext4_ext_get_access(handle, inode, curp)))
+               goto out;
+
+       curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
+       curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
+       curp->p_hdr->eh_entries = cpu_to_le16(1);
+       curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
+       /* FIXME: it works, but actually path[0] can be index */
+       curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
+       ext4_idx_store_pblock(curp->p_idx, newblock);
+
+       neh = ext_inode_hdr(inode);
+       fidx = EXT_FIRST_INDEX(neh);
+       ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
+                 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
+                 le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
+
+       neh->eh_depth = cpu_to_le16(path->p_depth + 1);
+       err = ext4_ext_dirty(handle, inode, curp);
+out:
+       brelse(bh);
+
+       return err;
+}
+
+/*
+ * ext4_ext_create_new_leaf:
+ * finds empty index and adds new leaf.
+ * if no free index is found, then it requests in-depth growing.
+ */
+static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
+                                       struct ext4_ext_path *path,
+                                       struct ext4_extent *newext)
+{
+       struct ext4_ext_path *curp;
+       int depth, i, err = 0;
+
+repeat:
+       i = depth = ext_depth(inode);
+
+       /* walk up to the tree and look for free index entry */
+       curp = path + depth;
+       while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
+               i--;
+               curp--;
+       }
+
+       /* we use already allocated block for index block,
+        * so subsequent data blocks should be contiguous */
+       if (EXT_HAS_FREE_INDEX(curp)) {
+               /* if we found index with free entry, then use that
+                * entry: create all needed subtree and add new leaf */
+               err = ext4_ext_split(handle, inode, path, newext, i);
+
+               /* refill path */
+               ext4_ext_drop_refs(path);
+               path = ext4_ext_find_extent(inode,
+                                           le32_to_cpu(newext->ee_block),
+                                           path);
+               if (IS_ERR(path))
+                       err = PTR_ERR(path);
+       } else {
+               /* tree is full, time to grow in depth */
+               err = ext4_ext_grow_indepth(handle, inode, path, newext);
+               if (err)
+                       goto out;
+
+               /* refill path */
+               ext4_ext_drop_refs(path);
+               path = ext4_ext_find_extent(inode,
+                                           le32_to_cpu(newext->ee_block),
+                                           path);
+               if (IS_ERR(path)) {
+                       err = PTR_ERR(path);
+                       goto out;
+               }
+
+               /*
+                * only first (depth 0 -> 1) produces free space;
+                * in all other cases we have to split the grown tree
+                */
+               depth = ext_depth(inode);
+               if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
+                       /* now we need to split */
+                       goto repeat;
+               }
+       }
+
+out:
+       return err;
+}
+
+/*
+ * ext4_ext_next_allocated_block:
+ * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
+ * NOTE: it considers block number from index entry as
+ * allocated block. Thus, index entries have to be consistent
+ * with leaves.
+ */
+static unsigned long
+ext4_ext_next_allocated_block(struct ext4_ext_path *path)
+{
+       int depth;
+
+       BUG_ON(path == NULL);
+       depth = path->p_depth;
+
+       if (depth == 0 && path->p_ext == NULL)
+               return EXT_MAX_BLOCK;
+
+       while (depth >= 0) {
+               if (depth == path->p_depth) {
+                       /* leaf */
+                       if (path[depth].p_ext !=
+                                       EXT_LAST_EXTENT(path[depth].p_hdr))
+                         return le32_to_cpu(path[depth].p_ext[1].ee_block);
+               } else {
+                       /* index */
+                       if (path[depth].p_idx !=
+                                       EXT_LAST_INDEX(path[depth].p_hdr))
+                         return le32_to_cpu(path[depth].p_idx[1].ei_block);
+               }
+               depth--;
+       }
+
+       return EXT_MAX_BLOCK;
+}
+
+/*
+ * ext4_ext_next_leaf_block:
+ * returns first allocated block from next leaf or EXT_MAX_BLOCK
+ */
+static unsigned ext4_ext_next_leaf_block(struct inode *inode,
+                                       struct ext4_ext_path *path)
+{
+       int depth;
+
+       BUG_ON(path == NULL);
+       depth = path->p_depth;
+
+       /* zero-tree has no leaf blocks at all */
+       if (depth == 0)
+               return EXT_MAX_BLOCK;
+
+       /* go to index block */
+       depth--;
+
+       while (depth >= 0) {
+               if (path[depth].p_idx !=
+                               EXT_LAST_INDEX(path[depth].p_hdr))
+                 return le32_to_cpu(path[depth].p_idx[1].ei_block);
+               depth--;
+       }
+
+       return EXT_MAX_BLOCK;
+}
+
+/*
+ * ext4_ext_correct_indexes:
+ * if leaf gets modified and modified extent is first in the leaf,
+ * then we have to correct all indexes above.
+ * TODO: do we need to correct tree in all cases?
+ */
+int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
+                               struct ext4_ext_path *path)
+{
+       struct ext4_extent_header *eh;
+       int depth = ext_depth(inode);
+       struct ext4_extent *ex;
+       __le32 border;
+       int k, err = 0;
+
+       eh = path[depth].p_hdr;
+       ex = path[depth].p_ext;
+       BUG_ON(ex == NULL);
+       BUG_ON(eh == NULL);
+
+       if (depth == 0) {
+               /* there is no tree at all */
+               return 0;
+       }
+
+       if (ex != EXT_FIRST_EXTENT(eh)) {
+               /* we correct tree if first leaf got modified only */
+               return 0;
+       }
+
+       /*
+        * TODO: we need correction if border is smaller than current one
+        */
+       k = depth - 1;
+       border = path[depth].p_ext->ee_block;
+       if ((err = ext4_ext_get_access(handle, inode, path + k)))
+               return err;
+       path[k].p_idx->ei_block = border;
+       if ((err = ext4_ext_dirty(handle, inode, path + k)))
+               return err;
+
+       while (k--) {
+               /* change all left-side indexes */
+               if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
+                       break;
+               if ((err = ext4_ext_get_access(handle, inode, path + k)))
+                       break;
+               path[k].p_idx->ei_block = border;
+               if ((err = ext4_ext_dirty(handle, inode, path + k)))
+                       break;
+       }
+
+       return err;
+}
+
+static int inline
+ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
+                               struct ext4_extent *ex2)
+{
+       if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len) !=
+                       le32_to_cpu(ex2->ee_block))
+               return 0;
+
+       /*
+        * To allow future support for preallocated extents to be added
+        * as an RO_COMPAT feature, refuse to merge to extents if
+        * this can result in the top bit of ee_len being set.
+        */
+       if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN)
+               return 0;
+#ifdef AGRESSIVE_TEST
+       if (le16_to_cpu(ex1->ee_len) >= 4)
+               return 0;
+#endif
+
+       if (ext_pblock(ex1) + le16_to_cpu(ex1->ee_len) == ext_pblock(ex2))
+               return 1;
+       return 0;
+}
+
+/*
+ * ext4_ext_insert_extent:
+ * tries to merge requsted extent into the existing extent or
+ * inserts requested extent as new one into the tree,
+ * creating new leaf in the no-space case.
+ */
+int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
+                               struct ext4_ext_path *path,
+                               struct ext4_extent *newext)
+{
+       struct ext4_extent_header * eh;
+       struct ext4_extent *ex, *fex;
+       struct ext4_extent *nearex; /* nearest extent */
+       struct ext4_ext_path *npath = NULL;
+       int depth, len, err, next;
+
+       BUG_ON(newext->ee_len == 0);
+       depth = ext_depth(inode);
+       ex = path[depth].p_ext;
+       BUG_ON(path[depth].p_hdr == NULL);
+
+       /* try to insert block into found extent and return */
+       if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
+               ext_debug("append %d block to %d:%d (from %llu)\n",
+                               le16_to_cpu(newext->ee_len),
+                               le32_to_cpu(ex->ee_block),
+                               le16_to_cpu(ex->ee_len), ext_pblock(ex));
+               if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+                       return err;
+               ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
+                                        + le16_to_cpu(newext->ee_len));
+               eh = path[depth].p_hdr;
+               nearex = ex;
+               goto merge;
+       }
+
+repeat:
+       depth = ext_depth(inode);
+       eh = path[depth].p_hdr;
+       if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
+               goto has_space;
+
+       /* probably next leaf has space for us? */
+       fex = EXT_LAST_EXTENT(eh);
+       next = ext4_ext_next_leaf_block(inode, path);
+       if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
+           && next != EXT_MAX_BLOCK) {
+               ext_debug("next leaf block - %d\n", next);
+               BUG_ON(npath != NULL);
+               npath = ext4_ext_find_extent(inode, next, NULL);
+               if (IS_ERR(npath))
+                       return PTR_ERR(npath);
+               BUG_ON(npath->p_depth != path->p_depth);
+               eh = npath[depth].p_hdr;
+               if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
+                       ext_debug("next leaf isnt full(%d)\n",
+                                 le16_to_cpu(eh->eh_entries));
+                       path = npath;
+                       goto repeat;
+               }
+               ext_debug("next leaf has no free space(%d,%d)\n",
+                         le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
+       }
+
+       /*
+        * There is no free space in the found leaf.
+        * We're gonna add a new leaf in the tree.
+        */
+       err = ext4_ext_create_new_leaf(handle, inode, path, newext);
+       if (err)
+               goto cleanup;
+       depth = ext_depth(inode);
+       eh = path[depth].p_hdr;
+
+has_space:
+       nearex = path[depth].p_ext;
+
+       if ((err = ext4_ext_get_access(handle, inode, path + depth)))
+               goto cleanup;
+
+       if (!nearex) {
+               /* there is no extent in this leaf, create first one */
+               ext_debug("first extent in the leaf: %d:%llu:%d\n",
+                               le32_to_cpu(newext->ee_block),
+                               ext_pblock(newext),
+                               le16_to_cpu(newext->ee_len));
+               path[depth].p_ext = EXT_FIRST_EXTENT(eh);
+       } else if (le32_to_cpu(newext->ee_block)
+                          > le32_to_cpu(nearex->ee_block)) {
+/*             BUG_ON(newext->ee_block == nearex->ee_block); */
+               if (nearex != EXT_LAST_EXTENT(eh)) {
+                       len = EXT_MAX_EXTENT(eh) - nearex;
+                       len = (len - 1) * sizeof(struct ext4_extent);
+                       len = len < 0 ? 0 : len;
+                       ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
+                                       "move %d from 0x%p to 0x%p\n",
+                                       le32_to_cpu(newext->ee_block),
+                                       ext_pblock(newext),
+                                       le16_to_cpu(newext->ee_len),
+                                       nearex, len, nearex + 1, nearex + 2);
+                       memmove(nearex + 2, nearex + 1, len);
+               }
+               path[depth].p_ext = nearex + 1;
+       } else {
+               BUG_ON(newext->ee_block == nearex->ee_block);
+               len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
+               len = len < 0 ? 0 : len;
+               ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
+                               "move %d from 0x%p to 0x%p\n",
+                               le32_to_cpu(newext->ee_block),
+                               ext_pblock(newext),
+                               le16_to_cpu(newext->ee_len),
+                               nearex, len, nearex + 1, nearex + 2);
+               memmove(nearex + 1, nearex, len);
+               path[depth].p_ext = nearex;
+       }
+
+       eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
+       nearex = path[depth].p_ext;
+       nearex->ee_block = newext->ee_block;
+       nearex->ee_start = newext->ee_start;
+       nearex->ee_start_hi = newext->ee_start_hi;
+       nearex->ee_len = newext->ee_len;
+
+merge:
+       /* try to merge extents to the right */
+       while (nearex < EXT_LAST_EXTENT(eh)) {
+               if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1))
+                       break;
+               /* merge with next extent! */
+               nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
+                                            + le16_to_cpu(nearex[1].ee_len));
+               if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
+                       len = (EXT_LAST_EXTENT(eh) - nearex - 1)
+                                       * sizeof(struct ext4_extent);
+                       memmove(nearex + 1, nearex + 2, len);
+               }
+               eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
+               BUG_ON(eh->eh_entries == 0);
+       }
+
+       /* try to merge extents to the left */
+
+       /* time to correct all indexes above */
+       err = ext4_ext_correct_indexes(handle, inode, path);
+       if (err)
+               goto cleanup;
+
+       err = ext4_ext_dirty(handle, inode, path + depth);
+
+cleanup:
+       if (npath) {
+               ext4_ext_drop_refs(npath);
+               kfree(npath);
+       }
+       ext4_ext_tree_changed(inode);
+       ext4_ext_invalidate_cache(inode);
+       return err;
+}
+
+int ext4_ext_walk_space(struct inode *inode, unsigned long block,
+                       unsigned long num, ext_prepare_callback func,
+                       void *cbdata)
+{
+       struct ext4_ext_path *path = NULL;
+       struct ext4_ext_cache cbex;
+       struct ext4_extent *ex;
+       unsigned long next, start = 0, end = 0;
+       unsigned long last = block + num;
+       int depth, exists, err = 0;
+
+       BUG_ON(func == NULL);
+       BUG_ON(inode == NULL);
+
+       while (block < last && block != EXT_MAX_BLOCK) {
+               num = last - block;
+               /* find extent for this block */
+               path = ext4_ext_find_extent(inode, block, path);
+               if (IS_ERR(path)) {
+                       err = PTR_ERR(path);
+                       path = NULL;
+                       break;
+               }
+
+               depth = ext_depth(inode);
+               BUG_ON(path[depth].p_hdr == NULL);
+               ex = path[depth].p_ext;
+               next = ext4_ext_next_allocated_block(path);
+
+               exists = 0;
+               if (!ex) {
+                       /* there is no extent yet, so try to allocate
+                        * all requested space */
+                       start = block;
+                       end = block + num;
+               } else if (le32_to_cpu(ex->ee_block) > block) {
+                       /* need to allocate space before found extent */
+                       start = block;
+                       end = le32_to_cpu(ex->ee_block);
+                       if (block + num < end)
+                               end = block + num;
+               } else if (block >=
+                            le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
+                       /* need to allocate space after found extent */
+                       start = block;
+                       end = block + num;
+                       if (end >= next)
+                               end = next;
+               } else if (block >= le32_to_cpu(ex->ee_block)) {
+                       /*
+                        * some part of requested space is covered
+                        * by found extent
+                        */
+                       start = block;
+                       end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
+                       if (block + num < end)
+                               end = block + num;
+                       exists = 1;
+               } else {
+                       BUG();
+               }
+               BUG_ON(end <= start);
+
+               if (!exists) {
+                       cbex.ec_block = start;
+                       cbex.ec_len = end - start;
+                       cbex.ec_start = 0;
+                       cbex.ec_type = EXT4_EXT_CACHE_GAP;
+               } else {
+                       cbex.ec_block = le32_to_cpu(ex->ee_block);
+                       cbex.ec_len = le16_to_cpu(ex->ee_len);
+                       cbex.ec_start = ext_pblock(ex);
+                       cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
+               }
+
+               BUG_ON(cbex.ec_len == 0);
+               err = func(inode, path, &cbex, cbdata);
+               ext4_ext_drop_refs(path);
+
+               if (err < 0)
+                       break;
+               if (err == EXT_REPEAT)
+                       continue;
+               else if (err == EXT_BREAK) {
+                       err = 0;
+                       break;
+               }
+
+               if (ext_depth(inode) != depth) {
+                       /* depth was changed. we have to realloc path */
+                       kfree(path);
+                       path = NULL;
+               }
+
+               block = cbex.ec_block + cbex.ec_len;
+       }
+
+       if (path) {
+               ext4_ext_drop_refs(path);
+               kfree(path);
+       }
+
+       return err;
+}
+
+static inline void
+ext4_ext_put_in_cache(struct inode *inode, __u32 block,
+                       __u32 len, __u32 start, int type)
+{
+       struct ext4_ext_cache *cex;
+       BUG_ON(len == 0);
+       cex = &EXT4_I(inode)->i_cached_extent;
+       cex->ec_type = type;
+       cex->ec_block = block;
+       cex->ec_len = len;
+       cex->ec_start = start;
+}
+
+/*
+ * ext4_ext_put_gap_in_cache:
+ * calculate boundaries of the gap that the requested block fits into
+ * and cache this gap
+ */
+static inline void
+ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
+                               unsigned long block)
+{
+       int depth = ext_depth(inode);
+       unsigned long lblock, len;
+       struct ext4_extent *ex;
+
+       ex = path[depth].p_ext;
+       if (ex == NULL) {
+               /* there is no extent yet, so gap is [0;-] */
+               lblock = 0;
+               len = EXT_MAX_BLOCK;
+               ext_debug("cache gap(whole file):");
+       } else if (block < le32_to_cpu(ex->ee_block)) {
+               lblock = block;
+               len = le32_to_cpu(ex->ee_block) - block;
+               ext_debug("cache gap(before): %lu [%lu:%lu]",
+                               (unsigned long) block,
+                               (unsigned long) le32_to_cpu(ex->ee_block),
+                               (unsigned long) le16_to_cpu(ex->ee_len));
+       } else if (block >= le32_to_cpu(ex->ee_block)
+                           + le16_to_cpu(ex->ee_len)) {
+               lblock = le32_to_cpu(ex->ee_block)
+                        + le16_to_cpu(ex->ee_len);
+               len = ext4_ext_next_allocated_block(path);
+               ext_debug("cache gap(after): [%lu:%lu] %lu",
+                               (unsigned long) le32_to_cpu(ex->ee_block),
+                               (unsigned long) le16_to_cpu(ex->ee_len),
+                               (unsigned long) block);
+               BUG_ON(len == lblock);
+               len = len - lblock;
+       } else {
+               lblock = len = 0;
+               BUG();
+       }
+
+       ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);
+       ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
+}
+
+static inline int
+ext4_ext_in_cache(struct inode *inode, unsigned long block,
+                       struct ext4_extent *ex)
+{
+       struct ext4_ext_cache *cex;
+
+       cex = &EXT4_I(inode)->i_cached_extent;
+
+       /* has cache valid data? */
+       if (cex->ec_type == EXT4_EXT_CACHE_NO)
+               return EXT4_EXT_CACHE_NO;
+
+       BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
+                       cex->ec_type != EXT4_EXT_CACHE_EXTENT);
+       if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
+               ex->ee_block = cpu_to_le32(cex->ec_block);
+               ext4_ext_store_pblock(ex, cex->ec_start);
+               ex->ee_len = cpu_to_le16(cex->ec_len);
+               ext_debug("%lu cached by %lu:%lu:%llu\n",
+                               (unsigned long) block,
+                               (unsigned long) cex->ec_block,
+                               (unsigned long) cex->ec_len,
+                               cex->ec_start);
+               return cex->ec_type;
+       }
+
+       /* not in cache */
+       return EXT4_EXT_CACHE_NO;
+}
+
+/*
+ * ext4_ext_rm_idx:
+ * removes index from the index block.
+ * It's used in truncate case only, thus all requests are for
+ * last index in the block only.
+ */
+int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
+                       struct ext4_ext_path *path)
+{
+       struct buffer_head *bh;
+       int err;
+       ext4_fsblk_t leaf;
+
+       /* free index block */
+       path--;
+       leaf = idx_pblock(path->p_idx);
+       BUG_ON(path->p_hdr->eh_entries == 0);
+       if ((err = ext4_ext_get_access(handle, inode, path)))
+               return err;
+       path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
+       if ((err = ext4_ext_dirty(handle, inode, path)))
+               return err;
+       ext_debug("index is empty, remove it, free block %llu\n", leaf);
+       bh = sb_find_get_block(inode->i_sb, leaf);
+       ext4_forget(handle, 1, inode, bh, leaf);
+       ext4_free_blocks(handle, inode, leaf, 1);
+       return err;
+}
+
+/*
+ * ext4_ext_calc_credits_for_insert:
+ * This routine returns max. credits that the extent tree can consume.
+ * It should be OK for low-performance paths like ->writepage()
+ * To allow many writing processes to fit into a single transaction,
+ * the caller should calculate credits under truncate_mutex and
+ * pass the actual path.
+ */
+int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
+                                               struct ext4_ext_path *path)
+{
+       int depth, needed;
+
+       if (path) {
+               /* probably there is space in leaf? */
+               depth = ext_depth(inode);
+               if (le16_to_cpu(path[depth].p_hdr->eh_entries)
+                               < le16_to_cpu(path[depth].p_hdr->eh_max))
+                       return 1;
+       }
+
+       /*
+        * given 32-bit logical block (4294967296 blocks), max. tree
+        * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
+        * Let's also add one more level for imbalance.
+        */
+       depth = 5;
+
+       /* allocation of new data block(s) */
+       needed = 2;
+
+       /*
+        * tree can be full, so it would need to grow in depth:
+        * allocation + old root + new root
+        */
+       needed += 2 + 1 + 1;
+
+       /*
+        * Index split can happen, we would need:
+        *    allocate intermediate indexes (bitmap + group)
+        *  + change two blocks at each level, but root (already included)
+        */
+       needed = (depth * 2) + (depth * 2);
+
+       /* any allocation modifies superblock */
+       needed += 1;
+
+       return needed;
+}
+
+static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
+                               struct ext4_extent *ex,
+                               unsigned long from, unsigned long to)
+{
+       struct buffer_head *bh;
+       int i;
+
+#ifdef EXTENTS_STATS
+       {
+               struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+               unsigned short ee_len =  le16_to_cpu(ex->ee_len);
+               spin_lock(&sbi->s_ext_stats_lock);
+               sbi->s_ext_blocks += ee_len;
+               sbi->s_ext_extents++;
+               if (ee_len < sbi->s_ext_min)
+                       sbi->s_ext_min = ee_len;
+               if (ee_len > sbi->s_ext_max)
+                       sbi->s_ext_max = ee_len;
+               if (ext_depth(inode) > sbi->s_depth_max)
+                       sbi->s_depth_max = ext_depth(inode);
+               spin_unlock(&sbi->s_ext_stats_lock);
+       }
+#endif
+       if (from >= le32_to_cpu(ex->ee_block)
+           && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+               /* tail removal */
+               unsigned long num;
+               ext4_fsblk_t start;
+               num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
+               start = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - num;
+               ext_debug("free last %lu blocks starting %llu\n", num, start);
+               for (i = 0; i < num; i++) {
+                       bh = sb_find_get_block(inode->i_sb, start + i);
+                       ext4_forget(handle, 0, inode, bh, start + i);
+               }
+               ext4_free_blocks(handle, inode, start, num);
+       } else if (from == le32_to_cpu(ex->ee_block)
+                  && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
+               printk("strange request: removal %lu-%lu from %u:%u\n",
+                      from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
+       } else {
+               printk("strange request: removal(2) %lu-%lu from %u:%u\n",
+                      from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
+       }
+       return 0;
+}
+
+static int
+ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
+               struct ext4_ext_path *path, unsigned long start)
+{
+       int err = 0, correct_index = 0;
+       int depth = ext_depth(inode), credits;
+       struct ext4_extent_header *eh;
+       unsigned a, b, block, num;
+       unsigned long ex_ee_block;
+       unsigned short ex_ee_len;
+       struct ext4_extent *ex;
+
+       ext_debug("truncate since %lu in leaf\n", start);
+       if (!path[depth].p_hdr)
+               path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
+       eh = path[depth].p_hdr;
+       BUG_ON(eh == NULL);
+       BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
+       BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
+
+       /* find where to start removing */
+       ex = EXT_LAST_EXTENT(eh);
+
+       ex_ee_block = le32_to_cpu(ex->ee_block);
+       ex_ee_len = le16_to_cpu(ex->ee_len);
+
+       while (ex >= EXT_FIRST_EXTENT(eh) &&
+                       ex_ee_block + ex_ee_len > start) {
+               ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
+               path[depth].p_ext = ex;
+
+               a = ex_ee_block > start ? ex_ee_block : start;
+               b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
+                       ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
+
+               ext_debug("  border %u:%u\n", a, b);
+
+               if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
+                       block = 0;
+                       num = 0;
+                       BUG();
+               } else if (a != ex_ee_block) {
+                       /* remove tail of the extent */
+                       block = ex_ee_block;
+                       num = a - block;
+               } else if (b != ex_ee_block + ex_ee_len - 1) {
+                       /* remove head of the extent */
+                       block = a;
+                       num = b - a;
+                       /* there is no "make a hole" API yet */
+                       BUG();
+               } else {
+                       /* remove whole extent: excellent! */
+                       block = ex_ee_block;
+                       num = 0;
+                       BUG_ON(a != ex_ee_block);
+                       BUG_ON(b != ex_ee_block + ex_ee_len - 1);
+               }
+
+               /* at present, extent can't cross block group: */
+               /* leaf + bitmap + group desc + sb + inode */
+               credits = 5;
+               if (ex == EXT_FIRST_EXTENT(eh)) {
+                       correct_index = 1;
+                       credits += (ext_depth(inode)) + 1;
+               }
+#ifdef CONFIG_QUOTA
+               credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
+#endif
+
+               handle = ext4_ext_journal_restart(handle, credits);
+               if (IS_ERR(handle)) {
+                       err = PTR_ERR(handle);
+                       goto out;
+               }
+
+               err = ext4_ext_get_access(handle, inode, path + depth);
+               if (err)
+                       goto out;
+
+               err = ext4_remove_blocks(handle, inode, ex, a, b);
+               if (err)
+                       goto out;
+
+               if (num == 0) {
+                       /* this extent is removed; mark slot entirely unused */
+                       ext4_ext_store_pblock(ex, 0);
+                       eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
+               }
+
+               ex->ee_block = cpu_to_le32(block);
+               ex->ee_len = cpu_to_le16(num);
+
+               err = ext4_ext_dirty(handle, inode, path + depth);
+               if (err)
+                       goto out;
+
+               ext_debug("new extent: %u:%u:%llu\n", block, num,
+                               ext_pblock(ex));
+               ex--;
+               ex_ee_block = le32_to_cpu(ex->ee_block);
+               ex_ee_len = le16_to_cpu(ex->ee_len);
+       }
+
+       if (correct_index && eh->eh_entries)
+               err = ext4_ext_correct_indexes(handle, inode, path);
+
+       /* if this leaf is free, then we should
+        * remove it from index block above */
+       if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
+               err = ext4_ext_rm_idx(handle, inode, path + depth);
+
+out:
+       return err;
+}
+
+/*
+ * ext4_ext_more_to_rm:
+ * returns 1 if current index has to be freed (even partial)
+ */
+static int inline
+ext4_ext_more_to_rm(struct ext4_ext_path *path)
+{
+       BUG_ON(path->p_idx == NULL);
+
+       if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
+               return 0;
+
+       /*
+        * if truncate on deeper level happened, it wasn't partial,
+        * so we have to consider current index for truncation
+        */
+       if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
+               return 0;
+       return 1;
+}
+
+int ext4_ext_remove_space(struct inode *inode, unsigned long start)
+{
+       struct super_block *sb = inode->i_sb;
+       int depth = ext_depth(inode);
+       struct ext4_ext_path *path;
+       handle_t *handle;
+       int i = 0, err = 0;
+
+       ext_debug("truncate since %lu\n", start);
+
+       /* probably first extent we're gonna free will be last in block */
+       handle = ext4_journal_start(inode, depth + 1);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       ext4_ext_invalidate_cache(inode);
+
+       /*
+        * We start scanning from right side, freeing all the blocks
+        * after i_size and walking into the tree depth-wise.
+        */
+       path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
+       if (path == NULL) {
+               ext4_journal_stop(handle);
+               return -ENOMEM;
+       }
+       memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
+       path[0].p_hdr = ext_inode_hdr(inode);
+       if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
+               err = -EIO;
+               goto out;
+       }
+       path[0].p_depth = depth;
+
+       while (i >= 0 && err == 0) {
+               if (i == depth) {
+                       /* this is leaf block */
+                       err = ext4_ext_rm_leaf(handle, inode, path, start);
+                       /* root level has p_bh == NULL, brelse() eats this */
+                       brelse(path[i].p_bh);
+                       path[i].p_bh = NULL;
+                       i--;
+                       continue;
+               }
+
+               /* this is index block */
+               if (!path[i].p_hdr) {
+                       ext_debug("initialize header\n");
+                       path[i].p_hdr = ext_block_hdr(path[i].p_bh);
+                       if (ext4_ext_check_header(__FUNCTION__, inode,
+                                                       path[i].p_hdr)) {
+                               err = -EIO;
+                               goto out;
+                       }
+               }
+
+               BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
+                          > le16_to_cpu(path[i].p_hdr->eh_max));
+               BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
+
+               if (!path[i].p_idx) {
+                       /* this level hasn't been touched yet */
+                       path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
+                       path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
+                       ext_debug("init index ptr: hdr 0x%p, num %d\n",
+                                 path[i].p_hdr,
+                                 le16_to_cpu(path[i].p_hdr->eh_entries));
+               } else {
+                       /* we were already here, see at next index */
+                       path[i].p_idx--;
+               }
+
+               ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
+                               i, EXT_FIRST_INDEX(path[i].p_hdr),
+                               path[i].p_idx);
+               if (ext4_ext_more_to_rm(path + i)) {
+                       /* go to the next level */
+                       ext_debug("move to level %d (block %llu)\n",
+                                 i + 1, idx_pblock(path[i].p_idx));
+                       memset(path + i + 1, 0, sizeof(*path));
+                       path[i+1].p_bh =
+                               sb_bread(sb, idx_pblock(path[i].p_idx));
+                       if (!path[i+1].p_bh) {
+                               /* should we reset i_size? */
+                               err = -EIO;
+                               break;
+                       }
+
+                       /* save actual number of indexes since this
+                        * number is changed at the next iteration */
+                       path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
+                       i++;
+               } else {
+                       /* we finished processing this index, go up */
+                       if (path[i].p_hdr->eh_entries == 0 && i > 0) {
+                               /* index is empty, remove it;
+                                * handle must be already prepared by the
+                                * truncatei_leaf() */
+                               err = ext4_ext_rm_idx(handle, inode, path + i);
+                       }
+                       /* root level has p_bh == NULL, brelse() eats this */
+                       brelse(path[i].p_bh);
+                       path[i].p_bh = NULL;
+                       i--;
+                       ext_debug("return to level %d\n", i);
+               }
+       }
+
+       /* TODO: flexible tree reduction should be here */
+       if (path->p_hdr->eh_entries == 0) {
+               /*
+                * truncate to zero freed all the tree,
+                * so we need to correct eh_depth
+                */
+               err = ext4_ext_get_access(handle, inode, path);
+               if (err == 0) {
+                       ext_inode_hdr(inode)->eh_depth = 0;
+                       ext_inode_hdr(inode)->eh_max =
+                               cpu_to_le16(ext4_ext_space_root(inode));
+                       err = ext4_ext_dirty(handle, inode, path);
+               }
+       }
+out:
+       ext4_ext_tree_changed(inode);
+       ext4_ext_drop_refs(path);
+       kfree(path);
+       ext4_journal_stop(handle);
+
+       return err;
+}
+
+/*
+ * called at mount time
+ */
+void ext4_ext_init(struct super_block *sb)
+{
+       /*
+        * possible initialization would be here
+        */
+
+       if (test_opt(sb, EXTENTS)) {
+               printk("EXT4-fs: file extents enabled");
+#ifdef AGRESSIVE_TEST
+               printk(", agressive tests");
+#endif
+#ifdef CHECK_BINSEARCH
+               printk(", check binsearch");
+#endif
+#ifdef EXTENTS_STATS
+               printk(", stats");
+#endif
+               printk("\n");
+#ifdef EXTENTS_STATS
+               spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
+               EXT4_SB(sb)->s_ext_min = 1 << 30;
+               EXT4_SB(sb)->s_ext_max = 0;
+#endif
+       }
+}
+
+/*
+ * called at umount time
+ */
+void ext4_ext_release(struct super_block *sb)
+{
+       if (!test_opt(sb, EXTENTS))
+               return;
+
+#ifdef EXTENTS_STATS
+       if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
+               struct ext4_sb_info *sbi = EXT4_SB(sb);
+               printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
+                       sbi->s_ext_blocks, sbi->s_ext_extents,
+                       sbi->s_ext_blocks / sbi->s_ext_extents);
+               printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
+                       sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
+       }
+#endif
+}
+
+int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t iblock,
+                       unsigned long max_blocks, struct buffer_head *bh_result,
+                       int create, int extend_disksize)
+{
+       struct ext4_ext_path *path = NULL;
+       struct ext4_extent newex, *ex;
+       ext4_fsblk_t goal, newblock;
+       int err = 0, depth;
+       unsigned long allocated = 0;
+
+       __clear_bit(BH_New, &bh_result->b_state);
+       ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,
+                       max_blocks, (unsigned) inode->i_ino);
+       mutex_lock(&EXT4_I(inode)->truncate_mutex);
+
+       /* check in cache */
+       if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) {
+               if (goal == EXT4_EXT_CACHE_GAP) {
+                       if (!create) {
+                               /* block isn't allocated yet and
+                                * user doesn't want to allocate it */
+                               goto out2;
+                       }
+                       /* we should allocate requested block */
+               } else if (goal == EXT4_EXT_CACHE_EXTENT) {
+                       /* block is already allocated */
+                       newblock = iblock
+                                  - le32_to_cpu(newex.ee_block)
+                                  + ext_pblock(&newex);
+                       /* number of remaining blocks in the extent */
+                       allocated = le16_to_cpu(newex.ee_len) -
+                                       (iblock - le32_to_cpu(newex.ee_block));
+                       goto out;
+               } else {
+                       BUG();
+               }
+       }
+
+       /* find extent for this block */
+       path = ext4_ext_find_extent(inode, iblock, NULL);
+       if (IS_ERR(path)) {
+               err = PTR_ERR(path);
+               path = NULL;
+               goto out2;
+       }
+
+       depth = ext_depth(inode);
+
+       /*
+        * consistent leaf must not be empty;
+        * this situation is possible, though, _during_ tree modification;
+        * this is why assert can't be put in ext4_ext_find_extent()
+        */
+       BUG_ON(path[depth].p_ext == NULL && depth != 0);
+
+       if ((ex = path[depth].p_ext)) {
+               unsigned long ee_block = le32_to_cpu(ex->ee_block);
+               ext4_fsblk_t ee_start = ext_pblock(ex);
+               unsigned short ee_len  = le16_to_cpu(ex->ee_len);
+
+               /*
+                * Allow future support for preallocated extents to be added
+                * as an RO_COMPAT feature:
+                * Uninitialized extents are treated as holes, except that
+                * we avoid (fail) allocating new blocks during a write.
+                */
+               if (ee_len > EXT_MAX_LEN)
+                       goto out2;
+               /* if found extent covers block, simply return it */
+               if (iblock >= ee_block && iblock < ee_block + ee_len) {
+                       newblock = iblock - ee_block + ee_start;
+                       /* number of remaining blocks in the extent */
+                       allocated = ee_len - (iblock - ee_block);
+                       ext_debug("%d fit into %lu:%d -> %llu\n", (int) iblock,
+                                       ee_block, ee_len, newblock);
+                       ext4_ext_put_in_cache(inode, ee_block, ee_len,
+                                               ee_start, EXT4_EXT_CACHE_EXTENT);
+                       goto out;
+               }
+       }
+
+       /*
+        * requested block isn't allocated yet;
+        * we couldn't try to create block if create flag is zero
+        */
+       if (!create) {
+               /* put just found gap into cache to speed up
+                * subsequent requests */
+               ext4_ext_put_gap_in_cache(inode, path, iblock);
+               goto out2;
+       }
+       /*
+        * Okay, we need to do block allocation.  Lazily initialize the block
+        * allocation info here if necessary.
+        */
+       if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
+               ext4_init_block_alloc_info(inode);
+
+       /* allocate new block */
+       goal = ext4_ext_find_goal(inode, path, iblock);
+       allocated = max_blocks;
+       newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
+       if (!newblock)
+               goto out2;
+       ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
+                       goal, newblock, allocated);
+
+       /* try to insert new extent into found leaf and return */
+       newex.ee_block = cpu_to_le32(iblock);
+       ext4_ext_store_pblock(&newex, newblock);
+       newex.ee_len = cpu_to_le16(allocated);
+       err = ext4_ext_insert_extent(handle, inode, path, &newex);
+       if (err)
+               goto out2;
+
+       if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
+               EXT4_I(inode)->i_disksize = inode->i_size;
+
+       /* previous routine could use block we allocated */
+       newblock = ext_pblock(&newex);
+       __set_bit(BH_New, &bh_result->b_state);
+
+       ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
+                               EXT4_EXT_CACHE_EXTENT);
+out:
+       if (allocated > max_blocks)
+               allocated = max_blocks;
+       ext4_ext_show_leaf(inode, path);
+       __set_bit(BH_Mapped, &bh_result->b_state);
+       bh_result->b_bdev = inode->i_sb->s_bdev;
+       bh_result->b_blocknr = newblock;
+out2:
+       if (path) {
+               ext4_ext_drop_refs(path);
+               kfree(path);
+       }
+       mutex_unlock(&EXT4_I(inode)->truncate_mutex);
+
+       return err ? err : allocated;
+}
+
+void ext4_ext_truncate(struct inode * inode, struct page *page)
+{
+       struct address_space *mapping = inode->i_mapping;
+       struct super_block *sb = inode->i_sb;
+       unsigned long last_block;
+       handle_t *handle;
+       int err = 0;
+
+       /*
+        * probably first extent we're gonna free will be last in block
+        */
+       err = ext4_writepage_trans_blocks(inode) + 3;
+       handle = ext4_journal_start(inode, err);
+       if (IS_ERR(handle)) {
+               if (page) {
+                       clear_highpage(page);
+                       flush_dcache_page(page);
+                       unlock_page(page);
+                       page_cache_release(page);
+               }
+               return;
+       }
+
+       if (page)
+               ext4_block_truncate_page(handle, page, mapping, inode->i_size);
+
+       mutex_lock(&EXT4_I(inode)->truncate_mutex);
+       ext4_ext_invalidate_cache(inode);
+
+       /*
+        * TODO: optimization is possible here.
+        * Probably we need not scan at all,
+        * because page truncation is enough.
+        */
+       if (ext4_orphan_add(handle, inode))
+               goto out_stop;
+
+       /* we have to know where to truncate from in crash case */
+       EXT4_I(inode)->i_disksize = inode->i_size;
+       ext4_mark_inode_dirty(handle, inode);
+
+       last_block = (inode->i_size + sb->s_blocksize - 1)
+                       >> EXT4_BLOCK_SIZE_BITS(sb);
+       err = ext4_ext_remove_space(inode, last_block);
+
+       /* In a multi-transaction truncate, we only make the final
+        * transaction synchronous. */
+       if (IS_SYNC(inode))
+               handle->h_sync = 1;
+
+out_stop:
+       /*
+        * If this was a simple ftruncate() and the file will remain alive,
+        * then we need to clear up the orphan record which we created above.
+        * However, if this was a real unlink then we were called by
+        * ext4_delete_inode(), and we allow that function to clean up the
+        * orphan info for us.
+        */
+       if (inode->i_nlink)
+               ext4_orphan_del(handle, inode);
+
+       mutex_unlock(&EXT4_I(inode)->truncate_mutex);
+       ext4_journal_stop(handle);
+}
+
+/*
+ * ext4_ext_writepage_trans_blocks:
+ * calculate max number of blocks we could modify
+ * in order to allocate new block for an inode
+ */
+int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
+{
+       int needed;
+
+       needed = ext4_ext_calc_credits_for_insert(inode, NULL);
+
+       /* caller wants to allocate num blocks, but note it includes sb */
+       needed = needed * num - (num - 1);
+
+#ifdef CONFIG_QUOTA
+       needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
+#endif
+
+       return needed;
+}
+
+EXPORT_SYMBOL(ext4_mark_inode_dirty);
+EXPORT_SYMBOL(ext4_ext_invalidate_cache);
+EXPORT_SYMBOL(ext4_ext_insert_extent);
+EXPORT_SYMBOL(ext4_ext_walk_space);
+EXPORT_SYMBOL(ext4_ext_find_goal);
+EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
+
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
new file mode 100644 (file)
index 0000000..0b622c0
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ *  linux/fs/ext4/file.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/file.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext4 fs regular file handling primitives
+ *
+ *  64-bit file support on 64-bit platforms by Jakub Jelinek
+ *     (jj@sunsite.ms.mff.cuni.cz)
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/ext4_fs.h>
+#include <linux/ext4_jbd2.h>
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * Called when an inode is released. Note that this is different
+ * from ext4_file_open: open gets called at every open, but release
+ * gets called only when /all/ the files are closed.
+ */
+static int ext4_release_file (struct inode * inode, struct file * filp)
+{
+       /* if we are the last writer on the inode, drop the block reservation */
+       if ((filp->f_mode & FMODE_WRITE) &&
+                       (atomic_read(&inode->i_writecount) == 1))
+       {
+               mutex_lock(&EXT4_I(inode)->truncate_mutex);
+               ext4_discard_reservation(inode);
+               mutex_unlock(&EXT4_I(inode)->truncate_mutex);
+       }
+       if (is_dx(inode) && filp->private_data)
+               ext4_htree_free_dir_info(filp->private_data);
+
+       return 0;
+}
+
+static ssize_t
+ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_dentry->d_inode;
+       ssize_t ret;
+       int err;
+
+       ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+       /*
+        * Skip flushing if there was an error, or if nothing was written.
+        */
+       if (ret <= 0)
+               return ret;
+
+       /*
+        * If the inode is IS_SYNC, or is O_SYNC and we are doing data
+        * journalling then we need to make sure that we force the transaction
+        * to disk to keep all metadata uptodate synchronously.
+        */
+       if (file->f_flags & O_SYNC) {
+               /*
+                * If we are non-data-journaled, then the dirty data has
+                * already been flushed to backing store by generic_osync_inode,
+                * and the inode has been flushed too if there have been any
+                * modifications other than mere timestamp updates.
+                *
+                * Open question --- do we care about flushing timestamps too
+                * if the inode is IS_SYNC?
+                */
+               if (!ext4_should_journal_data(inode))
+                       return ret;
+
+               goto force_commit;
+       }
+
+       /*
+        * So we know that there has been no forced data flush.  If the inode
+        * is marked IS_SYNC, we need to force one ourselves.
+        */
+       if (!IS_SYNC(inode))
+               return ret;
+
+       /*
+        * Open question #2 --- should we force data to disk here too?  If we
+        * don't, the only impact is that data=writeback filesystems won't
+        * flush data to disk automatically on IS_SYNC, only metadata (but
+        * historically, that is what ext2 has done.)
+        */
+
+force_commit:
+       err = ext4_force_commit(inode->i_sb);
+       if (err)
+               return err;
+       return ret;
+}
+
+const struct file_operations ext4_file_operations = {
+       .llseek         = generic_file_llseek,
+       .read           = do_sync_read,
+       .write          = do_sync_write,
+       .aio_read       = generic_file_aio_read,
+       .aio_write      = ext4_file_write,
+       .ioctl          = ext4_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = ext4_compat_ioctl,
+#endif
+       .mmap           = generic_file_mmap,
+       .open           = generic_file_open,
+       .release        = ext4_release_file,
+       .fsync          = ext4_sync_file,
+       .sendfile       = generic_file_sendfile,
+       .splice_read    = generic_file_splice_read,
+       .splice_write   = generic_file_splice_write,
+};
+
+struct inode_operations ext4_file_inode_operations = {
+       .truncate       = ext4_truncate,
+       .setattr        = ext4_setattr,
+#ifdef CONFIG_EXT4DEV_FS_XATTR
+       .setxattr       = generic_setxattr,
+       .getxattr       = generic_getxattr,
+       .listxattr      = ext4_listxattr,
+       .removexattr    = generic_removexattr,
+#endif
+       .permission     = ext4_permission,
+};
+
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
new file mode 100644 (file)
index 0000000..2a167d7
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ *  linux/fs/ext4/fsync.c
+ *
+ *  Copyright (C) 1993  Stephen Tweedie (sct@redhat.com)
+ *  from
+ *  Copyright (C) 1992  Remy Card (card@masi.ibp.fr)
+ *                      Laboratoire MASI - Institut Blaise Pascal
+ *                      Universite Pierre et Marie Curie (Paris VI)
+ *  from
+ *  linux/fs/minix/truncate.c   Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext4fs fsync primitive
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ *
+ *  Removed unnecessary code duplication for little endian machines
+ *  and excessive __inline__s.
+ *        Andi Kleen, 1997
+ *
+ * Major simplications and cleanup - we only need to do the metadata, because
+ * we can depend on generic_block_fdatasync() to sync the data blocks.
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/writeback.h>
+#include <linux/jbd2.h>
+#include <linux/ext4_fs.h>
+#include <linux/ext4_jbd2.h>
+
+/*
+ * akpm: A new design for ext4_sync_file().
+ *
+ * This is only called from sys_fsync(), sys_fdatasync() and sys_msync().
+ * There cannot be a transaction open by this task.
+ * Another task could have dirtied this inode.  Its data can be in any
+ * state in the journalling system.
+ *
+ * What we do is just kick off a commit and wait on it.  This will snapshot the
+ * inode to disk.
+ */
+
+int ext4_sync_file(struct file * file, struct dentry *dentry, int datasync)
+{
+       struct inode *inode = dentry->d_inode;
+       int ret = 0;
+
+       J_ASSERT(ext4_journal_current_handle() == 0);
+
+       /*
+        * data=writeback:
+        *  The caller's filemap_fdatawrite()/wait will sync the data.
+        *  sync_inode() will sync the metadata
+        *
+        * data=ordered:
+        *  The caller's filemap_fdatawrite() will write the data and
+        *  sync_inode() will write the inode if it is dirty.  Then the caller's
+        *  filemap_fdatawait() will wait on the pages.
+        *
+        * data=journal:
+        *  filemap_fdatawrite won't do anything (the buffers are clean).
+        *  ext4_force_commit will write the file data into the journal and
+        *  will wait on that.
+        *  filemap_fdatawait() will encounter a ton of newly-dirtied pages
+        *  (they were dirtied by commit).  But that's OK - the blocks are
+        *  safe in-journal, which is all fsync() needs to ensure.
+        */
+       if (ext4_should_journal_data(inode)) {
+               ret = ext4_force_commit(inode->i_sb);
+               goto out;
+       }
+
+       /*
+        * The VFS has written the file data.  If the inode is unaltered
+        * then we need not start a commit.
+        */
+       if (inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC)) {
+               struct writeback_control wbc = {
+                       .sync_mode = WB_SYNC_ALL,
+                       .nr_to_write = 0, /* sys_fsync did this */
+               };
+               ret = sync_inode(inode, &wbc);
+       }
+out:
+       return ret;
+}
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
new file mode 100644 (file)
index 0000000..a679663
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ *  linux/fs/ext4/hash.c
+ *
+ * Copyright (C) 2002 by Theodore Ts'o
+ *
+ * This file is released under the GPL v2.
+ *
+ * This file may be redistributed under the terms of the GNU Public
+ * License.
+ */
+
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/sched.h>
+#include <linux/ext4_fs.h>
+#include <linux/cryptohash.h>
+
+#define DELTA 0x9E3779B9
+
+static void TEA_transform(__u32 buf[4], __u32 const in[])
+{
+       __u32   sum = 0;
+       __u32   b0 = buf[0], b1 = buf[1];
+       __u32   a = in[0], b = in[1], c = in[2], d = in[3];
+       int     n = 16;
+
+       do {
+               sum += DELTA;
+               b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
+               b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
+       } while(--n);
+
+       buf[0] += b0;
+       buf[1] += b1;
+}
+
+
+/* The old legacy hash */
+static __u32 dx_hack_hash (const char *name, int len)
+{
+       __u32 hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
+       while (len--) {
+               __u32 hash = hash1 + (hash0 ^ (*name++ * 7152373));
+
+               if (hash & 0x80000000) hash -= 0x7fffffff;
+               hash1 = hash0;
+               hash0 = hash;
+       }
+       return (hash0 << 1);
+}
+
+static void str2hashbuf(const char *msg, int len, __u32 *buf, int num)
+{
+       __u32   pad, val;
+       int     i;
+
+       pad = (__u32)len | ((__u32)len << 8);
+       pad |= pad << 16;
+
+       val = pad;
+       if (len > num*4)
+               len = num * 4;
+       for (i=0; i < len; i++) {
+               if ((i % 4) == 0)
+                       val = pad;
+               val = msg[i] + (val << 8);
+               if ((i % 4) == 3) {
+                       *buf++ = val;
+                       val = pad;
+                       num--;
+               }
+       }
+       if (--num >= 0)
+               *buf++ = val;
+       while (--num >= 0)
+               *buf++ = pad;
+}
+
+/*
+ * Returns the hash of a filename.  If len is 0 and name is NULL, then
+ * this function can be used to test whether or not a hash version is
+ * supported.
+ *
+ * The seed is an 4 longword (32 bits) "secret" which can be used to
+ * uniquify a hash.  If the seed is all zero's, then some default seed
+ * may be used.
+ *
+ * A particular hash version specifies whether or not the seed is
+ * represented, and whether or not the returned hash is 32 bits or 64
+ * bits.  32 bit hashes will return 0 for the minor hash.
+ */
+int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
+{
+       __u32   hash;
+       __u32   minor_hash = 0;
+       const char      *p;
+       int             i;
+       __u32           in[8], buf[4];
+
+       /* Initialize the default seed for the hash checksum functions */
+       buf[0] = 0x67452301;
+       buf[1] = 0xefcdab89;
+       buf[2] = 0x98badcfe;
+       buf[3] = 0x10325476;
+
+       /* Check to see if the seed is all zero's */
+       if (hinfo->seed) {
+               for (i=0; i < 4; i++) {
+                       if (hinfo->seed[i])
+                               break;
+               }
+               if (i < 4)
+                       memcpy(buf, hinfo->seed, sizeof(buf));
+       }
+
+       switch (hinfo->hash_version) {
+       case DX_HASH_LEGACY:
+               hash = dx_hack_hash(name, len);
+               break;
+       case DX_HASH_HALF_MD4:
+               p = name;
+               while (len > 0) {
+                       str2hashbuf(p, len, in, 8);
+                       half_md4_transform(buf, in);
+                       len -= 32;
+                       p += 32;
+               }
+               minor_hash = buf[2];
+               hash = buf[1];
+               break;
+       case DX_HASH_TEA:
+               p = name;
+               while (len > 0) {
+                       str2hashbuf(p, len, in, 4);
+                       TEA_transform(buf, in);
+                       len -= 16;
+                       p += 16;
+               }
+               hash = buf[0];
+               minor_hash = buf[1];
+               break;
+       default:
+               hinfo->hash = 0;
+               return -1;
+       }
+       hash = hash & ~1;
+       if (hash == (EXT4_HTREE_EOF << 1))
+               hash = (EXT4_HTREE_EOF-1) << 1;
+       hinfo->hash = hash;
+       hinfo->minor_hash = minor_hash;
+       return 0;
+}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
new file mode 100644 (file)
index 0000000..c88b439
--- /dev/null
@@ -0,0 +1,772 @@
+/*
+ *  linux/fs/ext4/ialloc.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  BSD ufs-inspired inode and directory allocation by
+ *  Stephen Tweedie (sct@redhat.com), 1993
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/ext4_fs.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/random.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <asm/byteorder.h>
+
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * ialloc.c contains the inodes allocation and deallocation routines
+ */
+
+/*
+ * The free inodes are managed by bitmaps.  A file system contains several
+ * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
+ * block for inodes, N blocks for the inode table and data blocks.
+ *
+ * The file system contains group descriptors which are located after the
+ * super block.  Each descriptor contains the number of the bitmap block and
+ * the free blocks count in the block.
+ */
+
+
+/*
+ * Read the inode allocation bitmap for a given block_group, reading
+ * into the specified slot in the superblock's bitmap cache.
+ *
+ * Return buffer_head of bitmap on success or NULL.
+ */
+static struct buffer_head *
+read_inode_bitmap(struct super_block * sb, unsigned long block_group)
+{
+       struct ext4_group_desc *desc;
+       struct buffer_head *bh = NULL;
+
+       desc = ext4_get_group_desc(sb, block_group, NULL);
+       if (!desc)
+               goto error_out;
+
+       bh = sb_bread(sb, ext4_inode_bitmap(sb, desc));
+       if (!bh)
+               ext4_error(sb, "read_inode_bitmap",
+                           "Cannot read inode bitmap - "
+                           "block_group = %lu, inode_bitmap = %llu",
+                           block_group, ext4_inode_bitmap(sb, desc));
+error_out:
+       return bh;
+}
+
+/*
+ * NOTE! When we get the inode, we're the only people
+ * that have access to it, and as such there are no
+ * race conditions we have to worry about. The inode
+ * is not on the hash-lists, and it cannot be reached
+ * through the filesystem because the directory entry
+ * has been deleted earlier.
+ *
+ * HOWEVER: we must make sure that we get no aliases,
+ * which means that we have to call "clear_inode()"
+ * _before_ we mark the inode not in use in the inode
+ * bitmaps. Otherwise a newly created file might use
+ * the same inode number (not actually the same pointer
+ * though), and then we'd have two inodes sharing the
+ * same inode number and space on the harddisk.
+ */
+void ext4_free_inode (handle_t *handle, struct inode * inode)
+{
+       struct super_block * sb = inode->i_sb;
+       int is_directory;
+       unsigned long ino;
+       struct buffer_head *bitmap_bh = NULL;
+       struct buffer_head *bh2;
+       unsigned long block_group;
+       unsigned long bit;
+       struct ext4_group_desc * gdp;
+       struct ext4_super_block * es;
+       struct ext4_sb_info *sbi;
+       int fatal = 0, err;
+
+       if (atomic_read(&inode->i_count) > 1) {
+               printk ("ext4_free_inode: inode has count=%d\n",
+                                       atomic_read(&inode->i_count));
+               return;
+       }
+       if (inode->i_nlink) {
+               printk ("ext4_free_inode: inode has nlink=%d\n",
+                       inode->i_nlink);
+               return;
+       }
+       if (!sb) {
+               printk("ext4_free_inode: inode on nonexistent device\n");
+               return;
+       }
+       sbi = EXT4_SB(sb);
+
+       ino = inode->i_ino;
+       ext4_debug ("freeing inode %lu\n", ino);
+
+       /*
+        * Note: we must free any quota before locking the superblock,
+        * as writing the quota to disk may need the lock as well.
+        */
+       DQUOT_INIT(inode);
+       ext4_xattr_delete_inode(handle, inode);
+       DQUOT_FREE_INODE(inode);
+       DQUOT_DROP(inode);
+
+       is_directory = S_ISDIR(inode->i_mode);
+
+       /* Do this BEFORE marking the inode not in use or returning an error */
+       clear_inode (inode);
+
+       es = EXT4_SB(sb)->s_es;
+       if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
+               ext4_error (sb, "ext4_free_inode",
+                           "reserved or nonexistent inode %lu", ino);
+               goto error_return;
+       }
+       block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
+       bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
+       bitmap_bh = read_inode_bitmap(sb, block_group);
+       if (!bitmap_bh)
+               goto error_return;
+
+       BUFFER_TRACE(bitmap_bh, "get_write_access");
+       fatal = ext4_journal_get_write_access(handle, bitmap_bh);
+       if (fatal)
+               goto error_return;
+
+       /* Ok, now we can actually update the inode bitmaps.. */
+       if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
+                                       bit, bitmap_bh->b_data))
+               ext4_error (sb, "ext4_free_inode",
+                             "bit already cleared for inode %lu", ino);
+       else {
+               gdp = ext4_get_group_desc (sb, block_group, &bh2);
+
+               BUFFER_TRACE(bh2, "get_write_access");
+               fatal = ext4_journal_get_write_access(handle, bh2);
+               if (fatal) goto error_return;
+
+               if (gdp) {
+                       spin_lock(sb_bgl_lock(sbi, block_group));
+                       gdp->bg_free_inodes_count = cpu_to_le16(
+                               le16_to_cpu(gdp->bg_free_inodes_count) + 1);
+                       if (is_directory)
+                               gdp->bg_used_dirs_count = cpu_to_le16(
+                                 le16_to_cpu(gdp->bg_used_dirs_count) - 1);
+                       spin_unlock(sb_bgl_lock(sbi, block_group));
+                       percpu_counter_inc(&sbi->s_freeinodes_counter);
+                       if (is_directory)
+                               percpu_counter_dec(&sbi->s_dirs_counter);
+
+               }
+               BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
+               err = ext4_journal_dirty_metadata(handle, bh2);
+               if (!fatal) fatal = err;
+       }
+       BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata");
+       err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+       if (!fatal)
+               fatal = err;
+       sb->s_dirt = 1;
+error_return:
+       brelse(bitmap_bh);
+       ext4_std_error(sb, fatal);
+}
+
+/*
+ * There are two policies for allocating an inode.  If the new inode is
+ * a directory, then a forward search is made for a block group with both
+ * free space and a low directory-to-inode ratio; if that fails, then of
+ * the groups with above-average free space, that group with the fewest
+ * directories already is chosen.
+ *
+ * For other inodes, search forward from the parent directory\'s block
+ * group to find a free inode.
+ */
+static int find_group_dir(struct super_block *sb, struct inode *parent)
+{
+       int ngroups = EXT4_SB(sb)->s_groups_count;
+       unsigned int freei, avefreei;
+       struct ext4_group_desc *desc, *best_desc = NULL;
+       struct buffer_head *bh;
+       int group, best_group = -1;
+
+       freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
+       avefreei = freei / ngroups;
+
+       for (group = 0; group < ngroups; group++) {
+               desc = ext4_get_group_desc (sb, group, &bh);
+               if (!desc || !desc->bg_free_inodes_count)
+                       continue;
+               if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
+                       continue;
+               if (!best_desc ||
+                   (le16_to_cpu(desc->bg_free_blocks_count) >
+                    le16_to_cpu(best_desc->bg_free_blocks_count))) {
+                       best_group = group;
+                       best_desc = desc;
+               }
+       }
+       return best_group;
+}
+
+/*
+ * Orlov's allocator for directories.
+ *
+ * We always try to spread first-level directories.
+ *
+ * If there are blockgroups with both free inodes and free blocks counts
+ * not worse than average we return one with smallest directory count.
+ * Otherwise we simply return a random group.
+ *
+ * For the rest rules look so:
+ *
+ * It's OK to put directory into a group unless
+ * it has too many directories already (max_dirs) or
+ * it has too few free inodes left (min_inodes) or
+ * it has too few free blocks left (min_blocks) or
+ * it's already running too large debt (max_debt).
+ * Parent's group is prefered, if it doesn't satisfy these
+ * conditions we search cyclically through the rest. If none
+ * of the groups look good we just look for a group with more
+ * free inodes than average (starting at parent's group).
+ *
+ * Debt is incremented each time we allocate a directory and decremented
+ * when we allocate an inode, within 0--255.
+ */
+
+#define INODE_COST 64
+#define BLOCK_COST 256
+
+static int find_group_orlov(struct super_block *sb, struct inode *parent)
+{
+       int parent_group = EXT4_I(parent)->i_block_group;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       int ngroups = sbi->s_groups_count;
+       int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
+       unsigned int freei, avefreei;
+       ext4_fsblk_t freeb, avefreeb;
+       ext4_fsblk_t blocks_per_dir;
+       unsigned int ndirs;
+       int max_debt, max_dirs, min_inodes;
+       ext4_grpblk_t min_blocks;
+       int group = -1, i;
+       struct ext4_group_desc *desc;
+       struct buffer_head *bh;
+
+       freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
+       avefreei = freei / ngroups;
+       freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+       avefreeb = freeb;
+       do_div(avefreeb, ngroups);
+       ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
+
+       if ((parent == sb->s_root->d_inode) ||
+           (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
+               int best_ndir = inodes_per_group;
+               int best_group = -1;
+
+               get_random_bytes(&group, sizeof(group));
+               parent_group = (unsigned)group % ngroups;
+               for (i = 0; i < ngroups; i++) {
+                       group = (parent_group + i) % ngroups;
+                       desc = ext4_get_group_desc (sb, group, &bh);
+                       if (!desc || !desc->bg_free_inodes_count)
+                               continue;
+                       if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
+                               continue;
+                       if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
+                               continue;
+                       if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
+                               continue;
+                       best_group = group;
+                       best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
+               }
+               if (best_group >= 0)
+                       return best_group;
+               goto fallback;
+       }
+
+       blocks_per_dir = ext4_blocks_count(es) - freeb;
+       do_div(blocks_per_dir, ndirs);
+
+       max_dirs = ndirs / ngroups + inodes_per_group / 16;
+       min_inodes = avefreei - inodes_per_group / 4;
+       min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
+
+       max_debt = EXT4_BLOCKS_PER_GROUP(sb);
+       max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
+       if (max_debt * INODE_COST > inodes_per_group)
+               max_debt = inodes_per_group / INODE_COST;
+       if (max_debt > 255)
+               max_debt = 255;
+       if (max_debt == 0)
+               max_debt = 1;
+
+       for (i = 0; i < ngroups; i++) {
+               group = (parent_group + i) % ngroups;
+               desc = ext4_get_group_desc (sb, group, &bh);
+               if (!desc || !desc->bg_free_inodes_count)
+                       continue;
+               if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
+                       continue;
+               if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
+                       continue;
+               if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
+                       continue;
+               return group;
+       }
+
+fallback:
+       for (i = 0; i < ngroups; i++) {
+               group = (parent_group + i) % ngroups;
+               desc = ext4_get_group_desc (sb, group, &bh);
+               if (!desc || !desc->bg_free_inodes_count)
+                       continue;
+               if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
+                       return group;
+       }
+
+       if (avefreei) {
+               /*
+                * The free-inodes counter is approximate, and for really small
+                * filesystems the above test can fail to find any blockgroups
+                */
+               avefreei = 0;
+               goto fallback;
+       }
+
+       return -1;
+}
+
+static int find_group_other(struct super_block *sb, struct inode *parent)
+{
+       int parent_group = EXT4_I(parent)->i_block_group;
+       int ngroups = EXT4_SB(sb)->s_groups_count;
+       struct ext4_group_desc *desc;
+       struct buffer_head *bh;
+       int group, i;
+
+       /*
+        * Try to place the inode in its parent directory
+        */
+       group = parent_group;
+       desc = ext4_get_group_desc (sb, group, &bh);
+       if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
+                       le16_to_cpu(desc->bg_free_blocks_count))
+               return group;
+
+       /*
+        * We're going to place this inode in a different blockgroup from its
+        * parent.  We want to cause files in a common directory to all land in
+        * the same blockgroup.  But we want files which are in a different
+        * directory which shares a blockgroup with our parent to land in a
+        * different blockgroup.
+        *
+        * So add our directory's i_ino into the starting point for the hash.
+        */
+       group = (group + parent->i_ino) % ngroups;
+
+       /*
+        * Use a quadratic hash to find a group with a free inode and some free
+        * blocks.
+        */
+       for (i = 1; i < ngroups; i <<= 1) {
+               group += i;
+               if (group >= ngroups)
+                       group -= ngroups;
+               desc = ext4_get_group_desc (sb, group, &bh);
+               if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
+                               le16_to_cpu(desc->bg_free_blocks_count))
+                       return group;
+       }
+
+       /*
+        * That failed: try linear search for a free inode, even if that group
+        * has no free blocks.
+        */
+       group = parent_group;
+       for (i = 0; i < ngroups; i++) {
+               if (++group >= ngroups)
+                       group = 0;
+               desc = ext4_get_group_desc (sb, group, &bh);
+               if (desc && le16_to_cpu(desc->bg_free_inodes_count))
+                       return group;
+       }
+
+       return -1;
+}
+
+/*
+ * There are two policies for allocating an inode.  If the new inode is
+ * a directory, then a forward search is made for a block group with both
+ * free space and a low directory-to-inode ratio; if that fails, then of
+ * the groups with above-average free space, that group with the fewest
+ * directories already is chosen.
+ *
+ * For other inodes, search forward from the parent directory's block
+ * group to find a free inode.
+ */
+struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
+{
+       struct super_block *sb;
+       struct buffer_head *bitmap_bh = NULL;
+       struct buffer_head *bh2;
+       int group;
+       unsigned long ino = 0;
+       struct inode * inode;
+       struct ext4_group_desc * gdp = NULL;
+       struct ext4_super_block * es;
+       struct ext4_inode_info *ei;
+       struct ext4_sb_info *sbi;
+       int err = 0;
+       struct inode *ret;
+       int i;
+
+       /* Cannot create files in a deleted directory */
+       if (!dir || !dir->i_nlink)
+               return ERR_PTR(-EPERM);
+
+       sb = dir->i_sb;
+       inode = new_inode(sb);
+       if (!inode)
+               return ERR_PTR(-ENOMEM);
+       ei = EXT4_I(inode);
+
+       sbi = EXT4_SB(sb);
+       es = sbi->s_es;
+       if (S_ISDIR(mode)) {
+               if (test_opt (sb, OLDALLOC))
+                       group = find_group_dir(sb, dir);
+               else
+                       group = find_group_orlov(sb, dir);
+       } else
+               group = find_group_other(sb, dir);
+
+       err = -ENOSPC;
+       if (group == -1)
+               goto out;
+
+       for (i = 0; i < sbi->s_groups_count; i++) {
+               err = -EIO;
+
+               gdp = ext4_get_group_desc(sb, group, &bh2);
+               if (!gdp)
+                       goto fail;
+
+               brelse(bitmap_bh);
+               bitmap_bh = read_inode_bitmap(sb, group);
+               if (!bitmap_bh)
+                       goto fail;
+
+               ino = 0;
+
+repeat_in_this_group:
+               ino = ext4_find_next_zero_bit((unsigned long *)
+                               bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino);
+               if (ino < EXT4_INODES_PER_GROUP(sb)) {
+
+                       BUFFER_TRACE(bitmap_bh, "get_write_access");
+                       err = ext4_journal_get_write_access(handle, bitmap_bh);
+                       if (err)
+                               goto fail;
+
+                       if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
+                                               ino, bitmap_bh->b_data)) {
+                               /* we won it */
+                               BUFFER_TRACE(bitmap_bh,
+                                       "call ext4_journal_dirty_metadata");
+                               err = ext4_journal_dirty_metadata(handle,
+                                                               bitmap_bh);
+                               if (err)
+                                       goto fail;
+                               goto got;
+                       }
+                       /* we lost it */
+                       jbd2_journal_release_buffer(handle, bitmap_bh);
+
+                       if (++ino < EXT4_INODES_PER_GROUP(sb))
+                               goto repeat_in_this_group;
+               }
+
+               /*
+                * This case is possible in concurrent environment.  It is very
+                * rare.  We cannot repeat the find_group_xxx() call because
+                * that will simply return the same blockgroup, because the
+                * group descriptor metadata has not yet been updated.
+                * So we just go onto the next blockgroup.
+                */
+               if (++group == sbi->s_groups_count)
+                       group = 0;
+       }
+       err = -ENOSPC;
+       goto out;
+
+got:
+       ino += group * EXT4_INODES_PER_GROUP(sb) + 1;
+       if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
+               ext4_error (sb, "ext4_new_inode",
+                           "reserved inode or inode > inodes count - "
+                           "block_group = %d, inode=%lu", group, ino);
+               err = -EIO;
+               goto fail;
+       }
+
+       BUFFER_TRACE(bh2, "get_write_access");
+       err = ext4_journal_get_write_access(handle, bh2);
+       if (err) goto fail;
+       spin_lock(sb_bgl_lock(sbi, group));
+       gdp->bg_free_inodes_count =
+               cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
+       if (S_ISDIR(mode)) {
+               gdp->bg_used_dirs_count =
+                       cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
+       }
+       spin_unlock(sb_bgl_lock(sbi, group));
+       BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
+       err = ext4_journal_dirty_metadata(handle, bh2);
+       if (err) goto fail;
+
+       percpu_counter_dec(&sbi->s_freeinodes_counter);
+       if (S_ISDIR(mode))
+               percpu_counter_inc(&sbi->s_dirs_counter);
+       sb->s_dirt = 1;
+
+       inode->i_uid = current->fsuid;
+       if (test_opt (sb, GRPID))
+               inode->i_gid = dir->i_gid;
+       else if (dir->i_mode & S_ISGID) {
+               inode->i_gid = dir->i_gid;
+               if (S_ISDIR(mode))
+                       mode |= S_ISGID;
+       } else
+               inode->i_gid = current->fsgid;
+       inode->i_mode = mode;
+
+       inode->i_ino = ino;
+       /* This is the optimal IO size (for stat), not the fs block size */
+       inode->i_blocks = 0;
+       inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
+
+       memset(ei->i_data, 0, sizeof(ei->i_data));
+       ei->i_dir_start_lookup = 0;
+       ei->i_disksize = 0;
+
+       ei->i_flags = EXT4_I(dir)->i_flags & ~EXT4_INDEX_FL;
+       if (S_ISLNK(mode))
+               ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
+       /* dirsync only applies to directories */
+       if (!S_ISDIR(mode))
+               ei->i_flags &= ~EXT4_DIRSYNC_FL;
+#ifdef EXT4_FRAGMENTS
+       ei->i_faddr = 0;
+       ei->i_frag_no = 0;
+       ei->i_frag_size = 0;
+#endif
+       ei->i_file_acl = 0;
+       ei->i_dir_acl = 0;
+       ei->i_dtime = 0;
+       ei->i_block_alloc_info = NULL;
+       ei->i_block_group = group;
+
+       ext4_set_inode_flags(inode);
+       if (IS_DIRSYNC(inode))
+               handle->h_sync = 1;
+       insert_inode_hash(inode);
+       spin_lock(&sbi->s_next_gen_lock);
+       inode->i_generation = sbi->s_next_generation++;
+       spin_unlock(&sbi->s_next_gen_lock);
+
+       ei->i_state = EXT4_STATE_NEW;
+       ei->i_extra_isize =
+               (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) ?
+               sizeof(struct ext4_inode) - EXT4_GOOD_OLD_INODE_SIZE : 0;
+
+       ret = inode;
+       if(DQUOT_ALLOC_INODE(inode)) {
+               err = -EDQUOT;
+               goto fail_drop;
+       }
+
+       err = ext4_init_acl(handle, inode, dir);
+       if (err)
+               goto fail_free_drop;
+
+       err = ext4_init_security(handle,inode, dir);
+       if (err)
+               goto fail_free_drop;
+
+       err = ext4_mark_inode_dirty(handle, inode);
+       if (err) {
+               ext4_std_error(sb, err);
+               goto fail_free_drop;
+       }
+       if (test_opt(sb, EXTENTS)) {
+               EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
+               ext4_ext_tree_init(handle, inode);
+               if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
+                       err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+                       if (err) goto fail;
+                       EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS);
+                       BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "call ext4_journal_dirty_metadata");
+                       err = ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
+               }
+       }
+
+       ext4_debug("allocating inode %lu\n", inode->i_ino);
+       goto really_out;
+fail:
+       ext4_std_error(sb, err);
+out:
+       iput(inode);
+       ret = ERR_PTR(err);
+really_out:
+       brelse(bitmap_bh);
+       return ret;
+
+fail_free_drop:
+       DQUOT_FREE_INODE(inode);
+
+fail_drop:
+       DQUOT_DROP(inode);
+       inode->i_flags |= S_NOQUOTA;
+       inode->i_nlink = 0;
+       iput(inode);
+       brelse(bitmap_bh);
+       return ERR_PTR(err);
+}
+
+/* Verify that we are loading a valid orphan from disk */
+struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
+{
+       unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
+       unsigned long block_group;
+       int bit;
+       struct buffer_head *bitmap_bh = NULL;
+       struct inode *inode = NULL;
+
+       /* Error cases - e2fsck has already cleaned up for us */
+       if (ino > max_ino) {
+               ext4_warning(sb, __FUNCTION__,
+                            "bad orphan ino %lu!  e2fsck was run?", ino);
+               goto out;
+       }
+
+       block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
+       bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
+       bitmap_bh = read_inode_bitmap(sb, block_group);
+       if (!bitmap_bh) {
+               ext4_warning(sb, __FUNCTION__,
+                            "inode bitmap error for orphan %lu", ino);
+               goto out;
+       }
+
+       /* Having the inode bit set should be a 100% indicator that this
+        * is a valid orphan (no e2fsck run on fs).  Orphans also include
+        * inodes that were being truncated, so we can't check i_nlink==0.
+        */
+       if (!ext4_test_bit(bit, bitmap_bh->b_data) ||
+                       !(inode = iget(sb, ino)) || is_bad_inode(inode) ||
+                       NEXT_ORPHAN(inode) > max_ino) {
+               ext4_warning(sb, __FUNCTION__,
+                            "bad orphan inode %lu!  e2fsck was run?", ino);
+               printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
+                      bit, (unsigned long long)bitmap_bh->b_blocknr,
+                      ext4_test_bit(bit, bitmap_bh->b_data));
+               printk(KERN_NOTICE "inode=%p\n", inode);
+               if (inode) {
+                       printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
+                              is_bad_inode(inode));
+                       printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
+                              NEXT_ORPHAN(inode));
+                       printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
+               }
+               /* Avoid freeing blocks if we got a bad deleted inode */
+               if (inode && inode->i_nlink == 0)
+                       inode->i_blocks = 0;
+               iput(inode);
+               inode = NULL;
+       }
+out:
+       brelse(bitmap_bh);
+       return inode;
+}
+
+unsigned long ext4_count_free_inodes (struct super_block * sb)
+{
+       unsigned long desc_count;
+       struct ext4_group_desc *gdp;
+       int i;
+#ifdef EXT4FS_DEBUG
+       struct ext4_super_block *es;
+       unsigned long bitmap_count, x;
+       struct buffer_head *bitmap_bh = NULL;
+
+       es = EXT4_SB(sb)->s_es;
+       desc_count = 0;
+       bitmap_count = 0;
+       gdp = NULL;
+       for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
+               gdp = ext4_get_group_desc (sb, i, NULL);
+               if (!gdp)
+                       continue;
+               desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
+               brelse(bitmap_bh);
+               bitmap_bh = read_inode_bitmap(sb, i);
+               if (!bitmap_bh)
+                       continue;
+
+               x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
+               printk("group %d: stored = %d, counted = %lu\n",
+                       i, le16_to_cpu(gdp->bg_free_inodes_count), x);
+               bitmap_count += x;
+       }
+       brelse(bitmap_bh);
+       printk("ext4_count_free_inodes: stored = %u, computed = %lu, %lu\n",
+               le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
+       return desc_count;
+#else
+       desc_count = 0;
+       for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
+               gdp = ext4_get_group_desc (sb, i, NULL);
+               if (!gdp)
+                       continue;
+               desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
+               cond_resched();
+       }
+       return desc_count;
+#endif
+}
+
+/* Called at mount-time, super-block is locked */
+unsigned long ext4_count_dirs (struct super_block * sb)
+{
+       unsigned long count = 0;
+       int i;
+
+       for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
+               struct ext4_group_desc *gdp = ext4_get_group_desc (sb, i, NULL);
+               if (!gdp)
+                       continue;
+               count += le16_to_cpu(gdp->bg_used_dirs_count);
+       }
+       return count;
+}
+
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
new file mode 100644 (file)
index 0000000..0a60ec5
--- /dev/null
@@ -0,0 +1,3233 @@
+/*
+ *  linux/fs/ext4/inode.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Goal-directed block allocation by Stephen Tweedie
+ *     (sct@redhat.com), 1993, 1998
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ *  64-bit file support on 64-bit platforms by Jakub Jelinek
+ *     (jj@sunsite.ms.mff.cuni.cz)
+ *
+ *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/jbd2.h>
+#include <linux/smp_lock.h>
+#include <linux/highuid.h>
+#include <linux/pagemap.h>
+#include <linux/quotaops.h>
+#include <linux/string.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+#include <linux/mpage.h>
+#include <linux/uio.h>
+#include <linux/bio.h>
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * Test whether an inode is a fast symlink.
+ */
+static int ext4_inode_is_fast_symlink(struct inode *inode)
+{
+       int ea_blocks = EXT4_I(inode)->i_file_acl ?
+               (inode->i_sb->s_blocksize >> 9) : 0;
+
+       return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
+}
+
+/*
+ * The ext4 forget function must perform a revoke if we are freeing data
+ * which has been journaled.  Metadata (eg. indirect blocks) must be
+ * revoked in all cases.
+ *
+ * "bh" may be NULL: a metadata block may have been freed from memory
+ * but there may still be a record of it in the journal, and that record
+ * still needs to be revoked.
+ */
+int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
+                       struct buffer_head *bh, ext4_fsblk_t blocknr)
+{
+       int err;
+
+       might_sleep();
+
+       BUFFER_TRACE(bh, "enter");
+
+       jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
+                 "data mode %lx\n",
+                 bh, is_metadata, inode->i_mode,
+                 test_opt(inode->i_sb, DATA_FLAGS));
+
+       /* Never use the revoke function if we are doing full data
+        * journaling: there is no need to, and a V1 superblock won't
+        * support it.  Otherwise, only skip the revoke on un-journaled
+        * data blocks. */
+
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
+           (!is_metadata && !ext4_should_journal_data(inode))) {
+               if (bh) {
+                       BUFFER_TRACE(bh, "call jbd2_journal_forget");
+                       return ext4_journal_forget(handle, bh);
+               }
+               return 0;
+       }
+
+       /*
+        * data!=journal && (is_metadata || should_journal_data(inode))
+        */
+       BUFFER_TRACE(bh, "call ext4_journal_revoke");
+       err = ext4_journal_revoke(handle, blocknr, bh);
+       if (err)
+               ext4_abort(inode->i_sb, __FUNCTION__,
+                          "error %d when attempting revoke", err);
+       BUFFER_TRACE(bh, "exit");
+       return err;
+}
+
+/*
+ * Work out how many blocks we need to proceed with the next chunk of a
+ * truncate transaction.
+ */
+static unsigned long blocks_for_truncate(struct inode *inode)
+{
+       unsigned long needed;
+
+       needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
+
+       /* Give ourselves just enough room to cope with inodes in which
+        * i_blocks is corrupt: we've seen disk corruptions in the past
+        * which resulted in random data in an inode which looked enough
+        * like a regular file for ext4 to try to delete it.  Things
+        * will go a bit crazy if that happens, but at least we should
+        * try not to panic the whole kernel. */
+       if (needed < 2)
+               needed = 2;
+
+       /* But we need to bound the transaction so we don't overflow the
+        * journal. */
+       if (needed > EXT4_MAX_TRANS_DATA)
+               needed = EXT4_MAX_TRANS_DATA;
+
+       return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
+}
+
+/*
+ * Truncate transactions can be complex and absolutely huge.  So we need to
+ * be able to restart the transaction at a conventient checkpoint to make
+ * sure we don't overflow the journal.
+ *
+ * start_transaction gets us a new handle for a truncate transaction,
+ * and extend_transaction tries to extend the existing one a bit.  If
+ * extend fails, we need to propagate the failure up and restart the
+ * transaction in the top-level truncate loop. --sct
+ */
+static handle_t *start_transaction(struct inode *inode)
+{
+       handle_t *result;
+
+       result = ext4_journal_start(inode, blocks_for_truncate(inode));
+       if (!IS_ERR(result))
+               return result;
+
+       ext4_std_error(inode->i_sb, PTR_ERR(result));
+       return result;
+}
+
+/*
+ * Try to extend this transaction for the purposes of truncation.
+ *
+ * Returns 0 if we managed to create more room.  If we can't create more
+ * room, and the transaction must be restarted we return 1.
+ */
+static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
+{
+       if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
+               return 0;
+       if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
+               return 0;
+       return 1;
+}
+
+/*
+ * Restart the transaction associated with *handle.  This does a commit,
+ * so before we call here everything must be consistently dirtied against
+ * this transaction.
+ */
+static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
+{
+       jbd_debug(2, "restarting handle %p\n", handle);
+       return ext4_journal_restart(handle, blocks_for_truncate(inode));
+}
+
+/*
+ * Called at the last iput() if i_nlink is zero.
+ */
+void ext4_delete_inode (struct inode * inode)
+{
+       handle_t *handle;
+
+       truncate_inode_pages(&inode->i_data, 0);
+
+       if (is_bad_inode(inode))
+               goto no_delete;
+
+       handle = start_transaction(inode);
+       if (IS_ERR(handle)) {
+               /*
+                * If we're going to skip the normal cleanup, we still need to
+                * make sure that the in-core orphan linked list is properly
+                * cleaned up.
+                */
+               ext4_orphan_del(NULL, inode);
+               goto no_delete;
+       }
+
+       if (IS_SYNC(inode))
+               handle->h_sync = 1;
+       inode->i_size = 0;
+       if (inode->i_blocks)
+               ext4_truncate(inode);
+       /*
+        * Kill off the orphan record which ext4_truncate created.
+        * AKPM: I think this can be inside the above `if'.
+        * Note that ext4_orphan_del() has to be able to cope with the
+        * deletion of a non-existent orphan - this is because we don't
+        * know if ext4_truncate() actually created an orphan record.
+        * (Well, we could do this if we need to, but heck - it works)
+        */
+       ext4_orphan_del(handle, inode);
+       EXT4_I(inode)->i_dtime  = get_seconds();
+
+       /*
+        * One subtle ordering requirement: if anything has gone wrong
+        * (transaction abort, IO errors, whatever), then we can still
+        * do these next steps (the fs will already have been marked as
+        * having errors), but we can't free the inode if the mark_dirty
+        * fails.
+        */
+       if (ext4_mark_inode_dirty(handle, inode))
+               /* If that failed, just do the required in-core inode clear. */
+               clear_inode(inode);
+       else
+               ext4_free_inode(handle, inode);
+       ext4_journal_stop(handle);
+       return;
+no_delete:
+       clear_inode(inode);     /* We must guarantee clearing of inode... */
+}
+
+typedef struct {
+       __le32  *p;
+       __le32  key;
+       struct buffer_head *bh;
+} Indirect;
+
+static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
+{
+       p->key = *(p->p = v);
+       p->bh = bh;
+}
+
+static int verify_chain(Indirect *from, Indirect *to)
+{
+       while (from <= to && from->key == *from->p)
+               from++;
+       return (from > to);
+}
+
+/**
+ *     ext4_block_to_path - parse the block number into array of offsets
+ *     @inode: inode in question (we are only interested in its superblock)
+ *     @i_block: block number to be parsed
+ *     @offsets: array to store the offsets in
+ *      @boundary: set this non-zero if the referred-to block is likely to be
+ *             followed (on disk) by an indirect block.
+ *
+ *     To store the locations of file's data ext4 uses a data structure common
+ *     for UNIX filesystems - tree of pointers anchored in the inode, with
+ *     data blocks at leaves and indirect blocks in intermediate nodes.
+ *     This function translates the block number into path in that tree -
+ *     return value is the path length and @offsets[n] is the offset of
+ *     pointer to (n+1)th node in the nth one. If @block is out of range
+ *     (negative or too large) warning is printed and zero returned.
+ *
+ *     Note: function doesn't find node addresses, so no IO is needed. All
+ *     we need to know is the capacity of indirect blocks (taken from the
+ *     inode->i_sb).
+ */
+
+/*
+ * Portability note: the last comparison (check that we fit into triple
+ * indirect block) is spelled differently, because otherwise on an
+ * architecture with 32-bit longs and 8Kb pages we might get into trouble
+ * if our filesystem had 8Kb blocks. We might use long long, but that would
+ * kill us on x86. Oh, well, at least the sign propagation does not matter -
+ * i_block would have to be negative in the very beginning, so we would not
+ * get there at all.
+ */
+
+static int ext4_block_to_path(struct inode *inode,
+                       long i_block, int offsets[4], int *boundary)
+{
+       int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+       int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
+       const long direct_blocks = EXT4_NDIR_BLOCKS,
+               indirect_blocks = ptrs,
+               double_blocks = (1 << (ptrs_bits * 2));
+       int n = 0;
+       int final = 0;
+
+       if (i_block < 0) {
+               ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
+       } else if (i_block < direct_blocks) {
+               offsets[n++] = i_block;
+               final = direct_blocks;
+       } else if ( (i_block -= direct_blocks) < indirect_blocks) {
+               offsets[n++] = EXT4_IND_BLOCK;
+               offsets[n++] = i_block;
+               final = ptrs;
+       } else if ((i_block -= indirect_blocks) < double_blocks) {
+               offsets[n++] = EXT4_DIND_BLOCK;
+               offsets[n++] = i_block >> ptrs_bits;
+               offsets[n++] = i_block & (ptrs - 1);
+               final = ptrs;
+       } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
+               offsets[n++] = EXT4_TIND_BLOCK;
+               offsets[n++] = i_block >> (ptrs_bits * 2);
+               offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
+               offsets[n++] = i_block & (ptrs - 1);
+               final = ptrs;
+       } else {
+               ext4_warning(inode->i_sb, "ext4_block_to_path", "block > big");
+       }
+       if (boundary)
+               *boundary = final - 1 - (i_block & (ptrs - 1));
+       return n;
+}
+
+/**
+ *     ext4_get_branch - read the chain of indirect blocks leading to data
+ *     @inode: inode in question
+ *     @depth: depth of the chain (1 - direct pointer, etc.)
+ *     @offsets: offsets of pointers in inode/indirect blocks
+ *     @chain: place to store the result
+ *     @err: here we store the error value
+ *
+ *     Function fills the array of triples <key, p, bh> and returns %NULL
+ *     if everything went OK or the pointer to the last filled triple
+ *     (incomplete one) otherwise. Upon the return chain[i].key contains
+ *     the number of (i+1)-th block in the chain (as it is stored in memory,
+ *     i.e. little-endian 32-bit), chain[i].p contains the address of that
+ *     number (it points into struct inode for i==0 and into the bh->b_data
+ *     for i>0) and chain[i].bh points to the buffer_head of i-th indirect
+ *     block for i>0 and NULL for i==0. In other words, it holds the block
+ *     numbers of the chain, addresses they were taken from (and where we can
+ *     verify that chain did not change) and buffer_heads hosting these
+ *     numbers.
+ *
+ *     Function stops when it stumbles upon zero pointer (absent block)
+ *             (pointer to last triple returned, *@err == 0)
+ *     or when it gets an IO error reading an indirect block
+ *             (ditto, *@err == -EIO)
+ *     or when it notices that chain had been changed while it was reading
+ *             (ditto, *@err == -EAGAIN)
+ *     or when it reads all @depth-1 indirect blocks successfully and finds
+ *     the whole chain, all way to the data (returns %NULL, *err == 0).
+ */
+static Indirect *ext4_get_branch(struct inode *inode, int depth, int *offsets,
+                                Indirect chain[4], int *err)
+{
+       struct super_block *sb = inode->i_sb;
+       Indirect *p = chain;
+       struct buffer_head *bh;
+
+       *err = 0;
+       /* i_data is not going away, no lock needed */
+       add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
+       if (!p->key)
+               goto no_block;
+       while (--depth) {
+               bh = sb_bread(sb, le32_to_cpu(p->key));
+               if (!bh)
+                       goto failure;
+               /* Reader: pointers */
+               if (!verify_chain(chain, p))
+                       goto changed;
+               add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
+               /* Reader: end */
+               if (!p->key)
+                       goto no_block;
+       }
+       return NULL;
+
+changed:
+       brelse(bh);
+       *err = -EAGAIN;
+       goto no_block;
+failure:
+       *err = -EIO;
+no_block:
+       return p;
+}
+
+/**
+ *     ext4_find_near - find a place for allocation with sufficient locality
+ *     @inode: owner
+ *     @ind: descriptor of indirect block.
+ *
+ *     This function returns the prefered place for block allocation.
+ *     It is used when heuristic for sequential allocation fails.
+ *     Rules are:
+ *       + if there is a block to the left of our position - allocate near it.
+ *       + if pointer will live in indirect block - allocate near that block.
+ *       + if pointer will live in inode - allocate in the same
+ *         cylinder group.
+ *
+ * In the latter case we colour the starting block by the callers PID to
+ * prevent it from clashing with concurrent allocations for a different inode
+ * in the same block group.   The PID is used here so that functionally related
+ * files will be close-by on-disk.
+ *
+ *     Caller must make sure that @ind is valid and will stay that way.
+ */
+static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
+       __le32 *p;
+       ext4_fsblk_t bg_start;
+       ext4_grpblk_t colour;
+
+       /* Try to find previous block */
+       for (p = ind->p - 1; p >= start; p--) {
+               if (*p)
+                       return le32_to_cpu(*p);
+       }
+
+       /* No such thing, so let's try location of indirect block */
+       if (ind->bh)
+               return ind->bh->b_blocknr;
+
+       /*
+        * It is going to be referred to from the inode itself? OK, just put it
+        * into the same cylinder group then.
+        */
+       bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
+       colour = (current->pid % 16) *
+                       (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
+       return bg_start + colour;
+}
+
+/**
+ *     ext4_find_goal - find a prefered place for allocation.
+ *     @inode: owner
+ *     @block:  block we want
+ *     @chain:  chain of indirect blocks
+ *     @partial: pointer to the last triple within a chain
+ *     @goal:  place to store the result.
+ *
+ *     Normally this function find the prefered place for block allocation,
+ *     stores it in *@goal and returns zero.
+ */
+
+static ext4_fsblk_t ext4_find_goal(struct inode *inode, long block,
+               Indirect chain[4], Indirect *partial)
+{
+       struct ext4_block_alloc_info *block_i;
+
+       block_i =  EXT4_I(inode)->i_block_alloc_info;
+
+       /*
+        * try the heuristic for sequential allocation,
+        * failing that at least try to get decent locality.
+        */
+       if (block_i && (block == block_i->last_alloc_logical_block + 1)
+               && (block_i->last_alloc_physical_block != 0)) {
+               return block_i->last_alloc_physical_block + 1;
+       }
+
+       return ext4_find_near(inode, partial);
+}
+
+/**
+ *     ext4_blks_to_allocate: Look up the block map and count the number
+ *     of direct blocks need to be allocated for the given branch.
+ *
+ *     @branch: chain of indirect blocks
+ *     @k: number of blocks need for indirect blocks
+ *     @blks: number of data blocks to be mapped.
+ *     @blocks_to_boundary:  the offset in the indirect block
+ *
+ *     return the total number of blocks to be allocate, including the
+ *     direct and indirect blocks.
+ */
+static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
+               int blocks_to_boundary)
+{
+       unsigned long count = 0;
+
+       /*
+        * Simple case, [t,d]Indirect block(s) has not allocated yet
+        * then it's clear blocks on that path have not allocated
+        */
+       if (k > 0) {
+               /* right now we don't handle cross boundary allocation */
+               if (blks < blocks_to_boundary + 1)
+                       count += blks;
+               else
+                       count += blocks_to_boundary + 1;
+               return count;
+       }
+
+       count++;
+       while (count < blks && count <= blocks_to_boundary &&
+               le32_to_cpu(*(branch[0].p + count)) == 0) {
+               count++;
+       }
+       return count;
+}
+
+/**
+ *     ext4_alloc_blocks: multiple allocate blocks needed for a branch
+ *     @indirect_blks: the number of blocks need to allocate for indirect
+ *                     blocks
+ *
+ *     @new_blocks: on return it will store the new block numbers for
+ *     the indirect blocks(if needed) and the first direct block,
+ *     @blks:  on return it will store the total number of allocated
+ *             direct blocks
+ */
+static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t goal, int indirect_blks, int blks,
+                       ext4_fsblk_t new_blocks[4], int *err)
+{
+       int target, i;
+       unsigned long count = 0;
+       int index = 0;
+       ext4_fsblk_t current_block = 0;
+       int ret = 0;
+
+       /*
+        * Here we try to allocate the requested multiple blocks at once,
+        * on a best-effort basis.
+        * To build a branch, we should allocate blocks for
+        * the indirect blocks(if not allocated yet), and at least
+        * the first direct block of this branch.  That's the
+        * minimum number of blocks need to allocate(required)
+        */
+       target = blks + indirect_blks;
+
+       while (1) {
+               count = target;
+               /* allocating blocks for indirect blocks and direct blocks */
+               current_block = ext4_new_blocks(handle,inode,goal,&count,err);
+               if (*err)
+                       goto failed_out;
+
+               target -= count;
+               /* allocate blocks for indirect blocks */
+               while (index < indirect_blks && count) {
+                       new_blocks[index++] = current_block++;
+                       count--;
+               }
+
+               if (count > 0)
+                       break;
+       }
+
+       /* save the new block number for the first direct block */
+       new_blocks[index] = current_block;
+
+       /* total number of blocks allocated for direct blocks */
+       ret = count;
+       *err = 0;
+       return ret;
+failed_out:
+       for (i = 0; i <index; i++)
+               ext4_free_blocks(handle, inode, new_blocks[i], 1);
+       return ret;
+}
+
+/**
+ *     ext4_alloc_branch - allocate and set up a chain of blocks.
+ *     @inode: owner
+ *     @indirect_blks: number of allocated indirect blocks
+ *     @blks: number of allocated direct blocks
+ *     @offsets: offsets (in the blocks) to store the pointers to next.
+ *     @branch: place to store the chain in.
+ *
+ *     This function allocates blocks, zeroes out all but the last one,
+ *     links them into chain and (if we are synchronous) writes them to disk.
+ *     In other words, it prepares a branch that can be spliced onto the
+ *     inode. It stores the information about that chain in the branch[], in
+ *     the same format as ext4_get_branch() would do. We are calling it after
+ *     we had read the existing part of chain and partial points to the last
+ *     triple of that (one with zero ->key). Upon the exit we have the same
+ *     picture as after the successful ext4_get_block(), except that in one
+ *     place chain is disconnected - *branch->p is still zero (we did not
+ *     set the last link), but branch->key contains the number that should
+ *     be placed into *branch->p to fill that gap.
+ *
+ *     If allocation fails we free all blocks we've allocated (and forget
+ *     their buffer_heads) and return the error value the from failed
+ *     ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
+ *     as described above and return 0.
+ */
+static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+                       int indirect_blks, int *blks, ext4_fsblk_t goal,
+                       int *offsets, Indirect *branch)
+{
+       int blocksize = inode->i_sb->s_blocksize;
+       int i, n = 0;
+       int err = 0;
+       struct buffer_head *bh;
+       int num;
+       ext4_fsblk_t new_blocks[4];
+       ext4_fsblk_t current_block;
+
+       num = ext4_alloc_blocks(handle, inode, goal, indirect_blks,
+                               *blks, new_blocks, &err);
+       if (err)
+               return err;
+
+       branch[0].key = cpu_to_le32(new_blocks[0]);
+       /*
+        * metadata blocks and data blocks are allocated.
+        */
+       for (n = 1; n <= indirect_blks;  n++) {
+               /*
+                * Get buffer_head for parent block, zero it out
+                * and set the pointer to new one, then send
+                * parent to disk.
+                */
+               bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+               branch[n].bh = bh;
+               lock_buffer(bh);
+               BUFFER_TRACE(bh, "call get_create_access");
+               err = ext4_journal_get_create_access(handle, bh);
+               if (err) {
+                       unlock_buffer(bh);
+                       brelse(bh);
+                       goto failed;
+               }
+
+               memset(bh->b_data, 0, blocksize);
+               branch[n].p = (__le32 *) bh->b_data + offsets[n];
+               branch[n].key = cpu_to_le32(new_blocks[n]);
+               *branch[n].p = branch[n].key;
+               if ( n == indirect_blks) {
+                       current_block = new_blocks[n];
+                       /*
+                        * End of chain, update the last new metablock of
+                        * the chain to point to the new allocated
+                        * data blocks numbers
+                        */
+                       for (i=1; i < num; i++)
+                               *(branch[n].p + i) = cpu_to_le32(++current_block);
+               }
+               BUFFER_TRACE(bh, "marking uptodate");
+               set_buffer_uptodate(bh);
+               unlock_buffer(bh);
+
+               BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
+               err = ext4_journal_dirty_metadata(handle, bh);
+               if (err)
+                       goto failed;
+       }
+       *blks = num;
+       return err;
+failed:
+       /* Allocation failed, free what we already allocated */
+       for (i = 1; i <= n ; i++) {
+               BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
+               ext4_journal_forget(handle, branch[i].bh);
+       }
+       for (i = 0; i <indirect_blks; i++)
+               ext4_free_blocks(handle, inode, new_blocks[i], 1);
+
+       ext4_free_blocks(handle, inode, new_blocks[i], num);
+
+       return err;
+}
+
+/**
+ * ext4_splice_branch - splice the allocated branch onto inode.
+ * @inode: owner
+ * @block: (logical) number of block we are adding
+ * @chain: chain of indirect blocks (with a missing link - see
+ *     ext4_alloc_branch)
+ * @where: location of missing link
+ * @num:   number of indirect blocks we are adding
+ * @blks:  number of direct blocks we are adding
+ *
+ * This function fills the missing link and does all housekeeping needed in
+ * inode (->i_blocks, etc.). In case of success we end up with the full
+ * chain to new block and return 0.
+ */
+static int ext4_splice_branch(handle_t *handle, struct inode *inode,
+                       long block, Indirect *where, int num, int blks)
+{
+       int i;
+       int err = 0;
+       struct ext4_block_alloc_info *block_i;
+       ext4_fsblk_t current_block;
+
+       block_i = EXT4_I(inode)->i_block_alloc_info;
+       /*
+        * If we're splicing into a [td]indirect block (as opposed to the
+        * inode) then we need to get write access to the [td]indirect block
+        * before the splice.
+        */
+       if (where->bh) {
+               BUFFER_TRACE(where->bh, "get_write_access");
+               err = ext4_journal_get_write_access(handle, where->bh);
+               if (err)
+                       goto err_out;
+       }
+       /* That's it */
+
+       *where->p = where->key;
+
+       /*
+        * Update the host buffer_head or inode to point to more just allocated
+        * direct blocks blocks
+        */
+       if (num == 0 && blks > 1) {
+               current_block = le32_to_cpu(where->key) + 1;
+               for (i = 1; i < blks; i++)
+                       *(where->p + i ) = cpu_to_le32(current_block++);
+       }
+
+       /*
+        * update the most recently allocated logical & physical block
+        * in i_block_alloc_info, to assist find the proper goal block for next
+        * allocation
+        */
+       if (block_i) {
+               block_i->last_alloc_logical_block = block + blks - 1;
+               block_i->last_alloc_physical_block =
+                               le32_to_cpu(where[num].key) + blks - 1;
+       }
+
+       /* We are done with atomic stuff, now do the rest of housekeeping */
+
+       inode->i_ctime = CURRENT_TIME_SEC;
+       ext4_mark_inode_dirty(handle, inode);
+
+       /* had we spliced it onto indirect block? */
+       if (where->bh) {
+               /*
+                * If we spliced it onto an indirect block, we haven't
+                * altered the inode.  Note however that if it is being spliced
+                * onto an indirect block at the very end of the file (the
+                * file is growing) then we *will* alter the inode to reflect
+                * the new i_size.  But that is not done here - it is done in
+                * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
+                */
+               jbd_debug(5, "splicing indirect only\n");
+               BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
+               err = ext4_journal_dirty_metadata(handle, where->bh);
+               if (err)
+                       goto err_out;
+       } else {
+               /*
+                * OK, we spliced it into the inode itself on a direct block.
+                * Inode was dirtied above.
+                */
+               jbd_debug(5, "splicing direct\n");
+       }
+       return err;
+
+err_out:
+       for (i = 1; i <= num; i++) {
+               BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
+               ext4_journal_forget(handle, where[i].bh);
+               ext4_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
+       }
+       ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
+
+       return err;
+}
+
+/*
+ * Allocation strategy is simple: if we have to allocate something, we will
+ * have to go the whole way to leaf. So let's do it before attaching anything
+ * to tree, set linkage between the newborn blocks, write them if sync is
+ * required, recheck the path, free and repeat if check fails, otherwise
+ * set the last missing link (that will protect us from any truncate-generated
+ * removals - all blocks on the path are immune now) and possibly force the
+ * write on the parent block.
+ * That has a nice additional property: no special recovery from the failed
+ * allocations is needed - we simply release blocks and do not touch anything
+ * reachable from inode.
+ *
+ * `handle' can be NULL if create == 0.
+ *
+ * The BKL may not be held on entry here.  Be sure to take it early.
+ * return > 0, # of blocks mapped or allocated.
+ * return = 0, if plain lookup failed.
+ * return < 0, error case.
+ */
+int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
+               sector_t iblock, unsigned long maxblocks,
+               struct buffer_head *bh_result,
+               int create, int extend_disksize)
+{
+       int err = -EIO;
+       int offsets[4];
+       Indirect chain[4];
+       Indirect *partial;
+       ext4_fsblk_t goal;
+       int indirect_blks;
+       int blocks_to_boundary = 0;
+       int depth;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       int count = 0;
+       ext4_fsblk_t first_block = 0;
+
+
+       J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
+       J_ASSERT(handle != NULL || create == 0);
+       depth = ext4_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
+
+       if (depth == 0)
+               goto out;
+
+       partial = ext4_get_branch(inode, depth, offsets, chain, &err);
+
+       /* Simplest case - block found, no allocation needed */
+       if (!partial) {
+               first_block = le32_to_cpu(chain[depth - 1].key);
+               clear_buffer_new(bh_result);
+               count++;
+               /*map more blocks*/
+               while (count < maxblocks && count <= blocks_to_boundary) {
+                       ext4_fsblk_t blk;
+
+                       if (!verify_chain(chain, partial)) {
+                               /*
+                                * Indirect block might be removed by
+                                * truncate while we were reading it.
+                                * Handling of that case: forget what we've
+                                * got now. Flag the err as EAGAIN, so it
+                                * will reread.
+                                */
+                               err = -EAGAIN;
+                               count = 0;
+                               break;
+                       }
+                       blk = le32_to_cpu(*(chain[depth-1].p + count));
+
+                       if (blk == first_block + count)
+                               count++;
+                       else
+                               break;
+               }
+               if (err != -EAGAIN)
+                       goto got_it;
+       }
+
+       /* Next simple case - plain lookup or failed read of indirect block */
+       if (!create || err == -EIO)
+               goto cleanup;
+
+       mutex_lock(&ei->truncate_mutex);
+
+       /*
+        * If the indirect block is missing while we are reading
+        * the chain(ext4_get_branch() returns -EAGAIN err), or
+        * if the chain has been changed after we grab the semaphore,
+        * (either because another process truncated this branch, or
+        * another get_block allocated this branch) re-grab the chain to see if
+        * the request block has been allocated or not.
+        *
+        * Since we already block the truncate/other get_block
+        * at this point, we will have the current copy of the chain when we
+        * splice the branch into the tree.
+        */
+       if (err == -EAGAIN || !verify_chain(chain, partial)) {
+               while (partial > chain) {
+                       brelse(partial->bh);
+                       partial--;
+               }
+               partial = ext4_get_branch(inode, depth, offsets, chain, &err);
+               if (!partial) {
+                       count++;
+                       mutex_unlock(&ei->truncate_mutex);
+                       if (err)
+                               goto cleanup;
+                       clear_buffer_new(bh_result);
+                       goto got_it;
+               }
+       }
+
+       /*
+        * Okay, we need to do block allocation.  Lazily initialize the block
+        * allocation info here if necessary
+       */
+       if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
+               ext4_init_block_alloc_info(inode);
+
+       goal = ext4_find_goal(inode, iblock, chain, partial);
+
+       /* the number of blocks need to allocate for [d,t]indirect blocks */
+       indirect_blks = (chain + depth) - partial - 1;
+
+       /*
+        * Next look up the indirect map to count the totoal number of
+        * direct blocks to allocate for this branch.
+        */
+       count = ext4_blks_to_allocate(partial, indirect_blks,
+                                       maxblocks, blocks_to_boundary);
+       /*
+        * Block out ext4_truncate while we alter the tree
+        */
+       err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal,
+                               offsets + (partial - chain), partial);
+
+       /*
+        * The ext4_splice_branch call will free and forget any buffers
+        * on the new chain if there is a failure, but that risks using
+        * up transaction credits, especially for bitmaps where the
+        * credits cannot be returned.  Can we handle this somehow?  We
+        * may need to return -EAGAIN upwards in the worst case.  --sct
+        */
+       if (!err)
+               err = ext4_splice_branch(handle, inode, iblock,
+                                       partial, indirect_blks, count);
+       /*
+        * i_disksize growing is protected by truncate_mutex.  Don't forget to
+        * protect it if you're about to implement concurrent
+        * ext4_get_block() -bzzz
+       */
+       if (!err && extend_disksize && inode->i_size > ei->i_disksize)
+               ei->i_disksize = inode->i_size;
+       mutex_unlock(&ei->truncate_mutex);
+       if (err)
+               goto cleanup;
+
+       set_buffer_new(bh_result);
+got_it:
+       map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
+       if (count > blocks_to_boundary)
+               set_buffer_boundary(bh_result);
+       err = count;
+       /* Clean up and exit */
+       partial = chain + depth - 1;    /* the whole chain */
+cleanup:
+       while (partial > chain) {
+               BUFFER_TRACE(partial->bh, "call brelse");
+               brelse(partial->bh);
+               partial--;
+       }
+       BUFFER_TRACE(bh_result, "returned");
+out:
+       return err;
+}
+
+#define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32)
+
+static int ext4_get_block(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create)
+{
+       handle_t *handle = journal_current_handle();
+       int ret = 0;
+       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
+
+       if (!create)
+               goto get_block;         /* A read */
+
+       if (max_blocks == 1)
+               goto get_block;         /* A single block get */
+
+       if (handle->h_transaction->t_state == T_LOCKED) {
+               /*
+                * Huge direct-io writes can hold off commits for long
+                * periods of time.  Let this commit run.
+                */
+               ext4_journal_stop(handle);
+               handle = ext4_journal_start(inode, DIO_CREDITS);
+               if (IS_ERR(handle))
+                       ret = PTR_ERR(handle);
+               goto get_block;
+       }
+
+       if (handle->h_buffer_credits <= EXT4_RESERVE_TRANS_BLOCKS) {
+               /*
+                * Getting low on buffer credits...
+                */
+               ret = ext4_journal_extend(handle, DIO_CREDITS);
+               if (ret > 0) {
+                       /*
+                        * Couldn't extend the transaction.  Start a new one.
+                        */
+                       ret = ext4_journal_restart(handle, DIO_CREDITS);
+               }
+       }
+
+get_block:
+       if (ret == 0) {
+               ret = ext4_get_blocks_wrap(handle, inode, iblock,
+                                       max_blocks, bh_result, create, 0);
+               if (ret > 0) {
+                       bh_result->b_size = (ret << inode->i_blkbits);
+                       ret = 0;
+               }
+       }
+       return ret;
+}
+
+/*
+ * `handle' can be NULL if create is zero
+ */
+struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
+                               long block, int create, int *errp)
+{
+       struct buffer_head dummy;
+       int fatal = 0, err;
+
+       J_ASSERT(handle != NULL || create == 0);
+
+       dummy.b_state = 0;
+       dummy.b_blocknr = -1000;
+       buffer_trace_init(&dummy.b_history);
+       err = ext4_get_blocks_wrap(handle, inode, block, 1,
+                                       &dummy, create, 1);
+       /*
+        * ext4_get_blocks_handle() returns number of blocks
+        * mapped. 0 in case of a HOLE.
+        */
+       if (err > 0) {
+               if (err > 1)
+                       WARN_ON(1);
+               err = 0;
+       }
+       *errp = err;
+       if (!err && buffer_mapped(&dummy)) {
+               struct buffer_head *bh;
+               bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
+               if (!bh) {
+                       *errp = -EIO;
+                       goto err;
+               }
+               if (buffer_new(&dummy)) {
+                       J_ASSERT(create != 0);
+                       J_ASSERT(handle != 0);
+
+                       /*
+                        * Now that we do not always journal data, we should
+                        * keep in mind whether this should always journal the
+                        * new buffer as metadata.  For now, regular file
+                        * writes use ext4_get_block instead, so it's not a
+                        * problem.
+                        */
+                       lock_buffer(bh);
+                       BUFFER_TRACE(bh, "call get_create_access");
+                       fatal = ext4_journal_get_create_access(handle, bh);
+                       if (!fatal && !buffer_uptodate(bh)) {
+                               memset(bh->b_data,0,inode->i_sb->s_blocksize);
+                               set_buffer_uptodate(bh);
+                       }
+                       unlock_buffer(bh);
+                       BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
+                       err = ext4_journal_dirty_metadata(handle, bh);
+                       if (!fatal)
+                               fatal = err;
+               } else {
+                       BUFFER_TRACE(bh, "not a new buffer");
+               }
+               if (fatal) {
+                       *errp = fatal;
+                       brelse(bh);
+                       bh = NULL;
+               }
+               return bh;
+       }
+err:
+       return NULL;
+}
+
+struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
+                              int block, int create, int *err)
+{
+       struct buffer_head * bh;
+
+       bh = ext4_getblk(handle, inode, block, create, err);
+       if (!bh)
+               return bh;
+       if (buffer_uptodate(bh))
+               return bh;
+       ll_rw_block(READ_META, 1, &bh);
+       wait_on_buffer(bh);
+       if (buffer_uptodate(bh))
+               return bh;
+       put_bh(bh);
+       *err = -EIO;
+       return NULL;
+}
+
+static int walk_page_buffers(  handle_t *handle,
+                               struct buffer_head *head,
+                               unsigned from,
+                               unsigned to,
+                               int *partial,
+                               int (*fn)(      handle_t *handle,
+                                               struct buffer_head *bh))
+{
+       struct buffer_head *bh;
+       unsigned block_start, block_end;
+       unsigned blocksize = head->b_size;
+       int err, ret = 0;
+       struct buffer_head *next;
+
+       for (   bh = head, block_start = 0;
+               ret == 0 && (bh != head || !block_start);
+               block_start = block_end, bh = next)
+       {
+               next = bh->b_this_page;
+               block_end = block_start + blocksize;
+               if (block_end <= from || block_start >= to) {
+                       if (partial && !buffer_uptodate(bh))
+                               *partial = 1;
+                       continue;
+               }
+               err = (*fn)(handle, bh);
+               if (!ret)
+                       ret = err;
+       }
+       return ret;
+}
+
+/*
+ * To preserve ordering, it is essential that the hole instantiation and
+ * the data write be encapsulated in a single transaction.  We cannot
+ * close off a transaction and start a new one between the ext4_get_block()
+ * and the commit_write().  So doing the jbd2_journal_start at the start of
+ * prepare_write() is the right place.
+ *
+ * Also, this function can nest inside ext4_writepage() ->
+ * block_write_full_page(). In that case, we *know* that ext4_writepage()
+ * has generated enough buffer credits to do the whole page.  So we won't
+ * block on the journal in that case, which is good, because the caller may
+ * be PF_MEMALLOC.
+ *
+ * By accident, ext4 can be reentered when a transaction is open via
+ * quota file writes.  If we were to commit the transaction while thus
+ * reentered, there can be a deadlock - we would be holding a quota
+ * lock, and the commit would never complete if another thread had a
+ * transaction open and was blocking on the quota lock - a ranking
+ * violation.
+ *
+ * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
+ * will _not_ run commit under these circumstances because handle->h_ref
+ * is elevated.  We'll still have enough credits for the tiny quotafile
+ * write.
+ */
+static int do_journal_get_write_access(handle_t *handle,
+                                       struct buffer_head *bh)
+{
+       if (!buffer_mapped(bh) || buffer_freed(bh))
+               return 0;
+       return ext4_journal_get_write_access(handle, bh);
+}
+
+static int ext4_prepare_write(struct file *file, struct page *page,
+                             unsigned from, unsigned to)
+{
+       struct inode *inode = page->mapping->host;
+       int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
+       handle_t *handle;
+       int retries = 0;
+
+retry:
+       handle = ext4_journal_start(inode, needed_blocks);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out;
+       }
+       if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
+               ret = nobh_prepare_write(page, from, to, ext4_get_block);
+       else
+               ret = block_prepare_write(page, from, to, ext4_get_block);
+       if (ret)
+               goto prepare_write_failed;
+
+       if (ext4_should_journal_data(inode)) {
+               ret = walk_page_buffers(handle, page_buffers(page),
+                               from, to, NULL, do_journal_get_write_access);
+       }
+prepare_write_failed:
+       if (ret)
+               ext4_journal_stop(handle);
+       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
+out:
+       return ret;
+}
+
+int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
+{
+       int err = jbd2_journal_dirty_data(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
+                                               bh, handle,err);
+       return err;
+}
+
+/* For commit_write() in data=journal mode */
+static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
+{
+       if (!buffer_mapped(bh) || buffer_freed(bh))
+               return 0;
+       set_buffer_uptodate(bh);
+       return ext4_journal_dirty_metadata(handle, bh);
+}
+
+/*
+ * We need to pick up the new inode size which generic_commit_write gave us
+ * `file' can be NULL - eg, when called from page_symlink().
+ *
+ * ext4 never places buffers on inode->i_mapping->private_list.  metadata
+ * buffers are managed internally.
+ */
+static int ext4_ordered_commit_write(struct file *file, struct page *page,
+                            unsigned from, unsigned to)
+{
+       handle_t *handle = ext4_journal_current_handle();
+       struct inode *inode = page->mapping->host;
+       int ret = 0, ret2;
+
+       ret = walk_page_buffers(handle, page_buffers(page),
+               from, to, NULL, ext4_journal_dirty_data);
+
+       if (ret == 0) {
+               /*
+                * generic_commit_write() will run mark_inode_dirty() if i_size
+                * changes.  So let's piggyback the i_disksize mark_inode_dirty
+                * into that.
+                */
+               loff_t new_i_size;
+
+               new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+               if (new_i_size > EXT4_I(inode)->i_disksize)
+                       EXT4_I(inode)->i_disksize = new_i_size;
+               ret = generic_commit_write(file, page, from, to);
+       }
+       ret2 = ext4_journal_stop(handle);
+       if (!ret)
+               ret = ret2;
+       return ret;
+}
+
+static int ext4_writeback_commit_write(struct file *file, struct page *page,
+                            unsigned from, unsigned to)
+{
+       handle_t *handle = ext4_journal_current_handle();
+       struct inode *inode = page->mapping->host;
+       int ret = 0, ret2;
+       loff_t new_i_size;
+
+       new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+       if (new_i_size > EXT4_I(inode)->i_disksize)
+               EXT4_I(inode)->i_disksize = new_i_size;
+
+       if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
+               ret = nobh_commit_write(file, page, from, to);
+       else
+               ret = generic_commit_write(file, page, from, to);
+
+       ret2 = ext4_journal_stop(handle);
+       if (!ret)
+               ret = ret2;
+       return ret;
+}
+
+static int ext4_journalled_commit_write(struct file *file,
+                       struct page *page, unsigned from, unsigned to)
+{
+       handle_t *handle = ext4_journal_current_handle();
+       struct inode *inode = page->mapping->host;
+       int ret = 0, ret2;
+       int partial = 0;
+       loff_t pos;
+
+       /*
+        * Here we duplicate the generic_commit_write() functionality
+        */
+       pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+
+       ret = walk_page_buffers(handle, page_buffers(page), from,
+                               to, &partial, commit_write_fn);
+       if (!partial)
+               SetPageUptodate(page);
+       if (pos > inode->i_size)
+               i_size_write(inode, pos);
+       EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
+       if (inode->i_size > EXT4_I(inode)->i_disksize) {
+               EXT4_I(inode)->i_disksize = inode->i_size;
+               ret2 = ext4_mark_inode_dirty(handle, inode);
+               if (!ret)
+                       ret = ret2;
+       }
+       ret2 = ext4_journal_stop(handle);
+       if (!ret)
+               ret = ret2;
+       return ret;
+}
+
+/*
+ * bmap() is special.  It gets used by applications such as lilo and by
+ * the swapper to find the on-disk block of a specific piece of data.
+ *
+ * Naturally, this is dangerous if the block concerned is still in the
+ * journal.  If somebody makes a swapfile on an ext4 data-journaling
+ * filesystem and enables swap, then they may get a nasty shock when the
+ * data getting swapped to that swapfile suddenly gets overwritten by
+ * the original zero's written out previously to the journal and
+ * awaiting writeback in the kernel's buffer cache.
+ *
+ * So, if we see any bmap calls here on a modified, data-journaled file,
+ * take extra steps to flush any blocks which might be in the cache.
+ */
+static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
+{
+       struct inode *inode = mapping->host;
+       journal_t *journal;
+       int err;
+
+       if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
+               /*
+                * This is a REALLY heavyweight approach, but the use of
+                * bmap on dirty files is expected to be extremely rare:
+                * only if we run lilo or swapon on a freshly made file
+                * do we expect this to happen.
+                *
+                * (bmap requires CAP_SYS_RAWIO so this does not
+                * represent an unprivileged user DOS attack --- we'd be
+                * in trouble if mortal users could trigger this path at
+                * will.)
+                *
+                * NB. EXT4_STATE_JDATA is not set on files other than
+                * regular files.  If somebody wants to bmap a directory
+                * or symlink and gets confused because the buffer
+                * hasn't yet been flushed to disk, they deserve
+                * everything they get.
+                */
+
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
+               journal = EXT4_JOURNAL(inode);
+               jbd2_journal_lock_updates(journal);
+               err = jbd2_journal_flush(journal);
+               jbd2_journal_unlock_updates(journal);
+
+               if (err)
+                       return 0;
+       }
+
+       return generic_block_bmap(mapping,block,ext4_get_block);
+}
+
+static int bget_one(handle_t *handle, struct buffer_head *bh)
+{
+       get_bh(bh);
+       return 0;
+}
+
+static int bput_one(handle_t *handle, struct buffer_head *bh)
+{
+       put_bh(bh);
+       return 0;
+}
+
+static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
+{
+       if (buffer_mapped(bh))
+               return ext4_journal_dirty_data(handle, bh);
+       return 0;
+}
+
+/*
+ * Note that we always start a transaction even if we're not journalling
+ * data.  This is to preserve ordering: any hole instantiation within
+ * __block_write_full_page -> ext4_get_block() should be journalled
+ * along with the data so we don't crash and then get metadata which
+ * refers to old data.
+ *
+ * In all journalling modes block_write_full_page() will start the I/O.
+ *
+ * Problem:
+ *
+ *     ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
+ *             ext4_writepage()
+ *
+ * Similar for:
+ *
+ *     ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
+ *
+ * Same applies to ext4_get_block().  We will deadlock on various things like
+ * lock_journal and i_truncate_mutex.
+ *
+ * Setting PF_MEMALLOC here doesn't work - too many internal memory
+ * allocations fail.
+ *
+ * 16May01: If we're reentered then journal_current_handle() will be
+ *         non-zero. We simply *return*.
+ *
+ * 1 July 2001: @@@ FIXME:
+ *   In journalled data mode, a data buffer may be metadata against the
+ *   current transaction.  But the same file is part of a shared mapping
+ *   and someone does a writepage() on it.
+ *
+ *   We will move the buffer onto the async_data list, but *after* it has
+ *   been dirtied. So there's a small window where we have dirty data on
+ *   BJ_Metadata.
+ *
+ *   Note that this only applies to the last partial page in the file.  The
+ *   bit which block_write_full_page() uses prepare/commit for.  (That's
+ *   broken code anyway: it's wrong for msync()).
+ *
+ *   It's a rare case: affects the final partial page, for journalled data
+ *   where the file is subject to bith write() and writepage() in the same
+ *   transction.  To fix it we'll need a custom block_write_full_page().
+ *   We'll probably need that anyway for journalling writepage() output.
+ *
+ * We don't honour synchronous mounts for writepage().  That would be
+ * disastrous.  Any write() or metadata operation will sync the fs for
+ * us.
+ *
+ * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
+ * we don't need to open a transaction here.
+ */
+static int ext4_ordered_writepage(struct page *page,
+                               struct writeback_control *wbc)
+{
+       struct inode *inode = page->mapping->host;
+       struct buffer_head *page_bufs;
+       handle_t *handle = NULL;
+       int ret = 0;
+       int err;
+
+       J_ASSERT(PageLocked(page));
+
+       /*
+        * We give up here if we're reentered, because it might be for a
+        * different filesystem.
+        */
+       if (ext4_journal_current_handle())
+               goto out_fail;
+
+       handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out_fail;
+       }
+
+       if (!page_has_buffers(page)) {
+               create_empty_buffers(page, inode->i_sb->s_blocksize,
+                               (1 << BH_Dirty)|(1 << BH_Uptodate));
+       }
+       page_bufs = page_buffers(page);
+       walk_page_buffers(handle, page_bufs, 0,
+                       PAGE_CACHE_SIZE, NULL, bget_one);
+
+       ret = block_write_full_page(page, ext4_get_block, wbc);
+
+       /*
+        * The page can become unlocked at any point now, and
+        * truncate can then come in and change things.  So we
+        * can't touch *page from now on.  But *page_bufs is
+        * safe due to elevated refcount.
+        */
+
+       /*
+        * And attach them to the current transaction.  But only if
+        * block_write_full_page() succeeded.  Otherwise they are unmapped,
+        * and generally junk.
+        */
+       if (ret == 0) {
+               err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
+                                       NULL, jbd2_journal_dirty_data_fn);
+               if (!ret)
+                       ret = err;
+       }
+       walk_page_buffers(handle, page_bufs, 0,
+                       PAGE_CACHE_SIZE, NULL, bput_one);
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+       return ret;
+
+out_fail:
+       redirty_page_for_writepage(wbc, page);
+       unlock_page(page);
+       return ret;
+}
+
+static int ext4_writeback_writepage(struct page *page,
+                               struct writeback_control *wbc)
+{
+       struct inode *inode = page->mapping->host;
+       handle_t *handle = NULL;
+       int ret = 0;
+       int err;
+
+       if (ext4_journal_current_handle())
+               goto out_fail;
+
+       handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out_fail;
+       }
+
+       if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
+               ret = nobh_writepage(page, ext4_get_block, wbc);
+       else
+               ret = block_write_full_page(page, ext4_get_block, wbc);
+
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+       return ret;
+
+out_fail:
+       redirty_page_for_writepage(wbc, page);
+       unlock_page(page);
+       return ret;
+}
+
+static int ext4_journalled_writepage(struct page *page,
+                               struct writeback_control *wbc)
+{
+       struct inode *inode = page->mapping->host;
+       handle_t *handle = NULL;
+       int ret = 0;
+       int err;
+
+       if (ext4_journal_current_handle())
+               goto no_write;
+
+       handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto no_write;
+       }
+
+       if (!page_has_buffers(page) || PageChecked(page)) {
+               /*
+                * It's mmapped pagecache.  Add buffers and journal it.  There
+                * doesn't seem much point in redirtying the page here.
+                */
+               ClearPageChecked(page);
+               ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
+                                       ext4_get_block);
+               if (ret != 0) {
+                       ext4_journal_stop(handle);
+                       goto out_unlock;
+               }
+               ret = walk_page_buffers(handle, page_buffers(page), 0,
+                       PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
+
+               err = walk_page_buffers(handle, page_buffers(page), 0,
+                               PAGE_CACHE_SIZE, NULL, commit_write_fn);
+               if (ret == 0)
+                       ret = err;
+               EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
+               unlock_page(page);
+       } else {
+               /*
+                * It may be a page full of checkpoint-mode buffers.  We don't
+                * really know unless we go poke around in the buffer_heads.
+                * But block_write_full_page will do the right thing.
+                */
+               ret = block_write_full_page(page, ext4_get_block, wbc);
+       }
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+out:
+       return ret;
+
+no_write:
+       redirty_page_for_writepage(wbc, page);
+out_unlock:
+       unlock_page(page);
+       goto out;
+}
+
+static int ext4_readpage(struct file *file, struct page *page)
+{
+       return mpage_readpage(page, ext4_get_block);
+}
+
+static int
+ext4_readpages(struct file *file, struct address_space *mapping,
+               struct list_head *pages, unsigned nr_pages)
+{
+       return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
+}
+
+static void ext4_invalidatepage(struct page *page, unsigned long offset)
+{
+       journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+
+       /*
+        * If it's a full truncate we just forget about the pending dirtying
+        */
+       if (offset == 0)
+               ClearPageChecked(page);
+
+       jbd2_journal_invalidatepage(journal, page, offset);
+}
+
+static int ext4_releasepage(struct page *page, gfp_t wait)
+{
+       journal_t *journal = EXT4_JOURNAL(page->mapping->host);
+
+       WARN_ON(PageChecked(page));
+       if (!page_has_buffers(page))
+               return 0;
+       return jbd2_journal_try_to_free_buffers(journal, page, wait);
+}
+
+/*
+ * If the O_DIRECT write will extend the file then add this inode to the
+ * orphan list.  So recovery will truncate it back to the original size
+ * if the machine crashes during the write.
+ *
+ * If the O_DIRECT write is intantiating holes inside i_size and the machine
+ * crashes then stale disk data _may_ be exposed inside the file.
+ */
+static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
+                       const struct iovec *iov, loff_t offset,
+                       unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       handle_t *handle = NULL;
+       ssize_t ret;
+       int orphan = 0;
+       size_t count = iov_length(iov, nr_segs);
+
+       if (rw == WRITE) {
+               loff_t final_size = offset + count;
+
+               handle = ext4_journal_start(inode, DIO_CREDITS);
+               if (IS_ERR(handle)) {
+                       ret = PTR_ERR(handle);
+                       goto out;
+               }
+               if (final_size > inode->i_size) {
+                       ret = ext4_orphan_add(handle, inode);
+                       if (ret)
+                               goto out_stop;
+                       orphan = 1;
+                       ei->i_disksize = inode->i_size;
+               }
+       }
+
+       ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
+                                offset, nr_segs,
+                                ext4_get_block, NULL);
+
+       /*
+        * Reacquire the handle: ext4_get_block() can restart the transaction
+        */
+       handle = journal_current_handle();
+
+out_stop:
+       if (handle) {
+               int err;
+
+               if (orphan && inode->i_nlink)
+                       ext4_orphan_del(handle, inode);
+               if (orphan && ret > 0) {
+                       loff_t end = offset + ret;
+                       if (end > inode->i_size) {
+                               ei->i_disksize = end;
+                               i_size_write(inode, end);
+                               /*
+                                * We're going to return a positive `ret'
+                                * here due to non-zero-length I/O, so there's
+                                * no way of reporting error returns from
+                                * ext4_mark_inode_dirty() to userspace.  So
+                                * ignore it.
+                                */
+                               ext4_mark_inode_dirty(handle, inode);
+                       }
+               }
+               err = ext4_journal_stop(handle);
+               if (ret == 0)
+                       ret = err;
+       }
+out:
+       return ret;
+}
+
+/*
+ * Pages can be marked dirty completely asynchronously from ext4's journalling
+ * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
+ * much here because ->set_page_dirty is called under VFS locks.  The page is
+ * not necessarily locked.
+ *
+ * We cannot just dirty the page and leave attached buffers clean, because the
+ * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
+ * or jbddirty because all the journalling code will explode.
+ *
+ * So what we do is to mark the page "pending dirty" and next time writepage
+ * is called, propagate that into the buffers appropriately.
+ */
+static int ext4_journalled_set_page_dirty(struct page *page)
+{
+       SetPageChecked(page);
+       return __set_page_dirty_nobuffers(page);
+}
+
+static const struct address_space_operations ext4_ordered_aops = {
+       .readpage       = ext4_readpage,
+       .readpages      = ext4_readpages,
+       .writepage      = ext4_ordered_writepage,
+       .sync_page      = block_sync_page,
+       .prepare_write  = ext4_prepare_write,
+       .commit_write   = ext4_ordered_commit_write,
+       .bmap           = ext4_bmap,
+       .invalidatepage = ext4_invalidatepage,
+       .releasepage    = ext4_releasepage,
+       .direct_IO      = ext4_direct_IO,
+       .migratepage    = buffer_migrate_page,
+};
+
+static const struct address_space_operations ext4_writeback_aops = {
+       .readpage       = ext4_readpage,
+       .readpages      = ext4_readpages,
+       .writepage      = ext4_writeback_writepage,
+       .sync_page      = block_sync_page,
+       .prepare_write  = ext4_prepare_write,
+       .commit_write   = ext4_writeback_commit_write,
+       .bmap           = ext4_bmap,
+       .invalidatepage = ext4_invalidatepage,
+       .releasepage    = ext4_releasepage,
+       .direct_IO      = ext4_direct_IO,
+       .migratepage    = buffer_migrate_page,
+};
+
+static const struct address_space_operations ext4_journalled_aops = {
+       .readpage       = ext4_readpage,
+       .readpages      = ext4_readpages,
+       .writepage      = ext4_journalled_writepage,
+       .sync_page      = block_sync_page,
+       .prepare_write  = ext4_prepare_write,
+       .commit_write   = ext4_journalled_commit_write,
+       .set_page_dirty = ext4_journalled_set_page_dirty,
+       .bmap           = ext4_bmap,
+       .invalidatepage = ext4_invalidatepage,
+       .releasepage    = ext4_releasepage,
+};
+
+void ext4_set_aops(struct inode *inode)
+{
+       if (ext4_should_order_data(inode))
+               inode->i_mapping->a_ops = &ext4_ordered_aops;
+       else if (ext4_should_writeback_data(inode))
+               inode->i_mapping->a_ops = &ext4_writeback_aops;
+       else
+               inode->i_mapping->a_ops = &ext4_journalled_aops;
+}
+
+/*
+ * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
+ * up to the end of the block which corresponds to `from'.
+ * This required during truncate. We need to physically zero the tail end
+ * of that block so it doesn't yield old data if the file is later grown.
+ */
+int ext4_block_truncate_page(handle_t *handle, struct page *page,
+               struct address_space *mapping, loff_t from)
+{
+       ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
+       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned blocksize, iblock, length, pos;
+       struct inode *inode = mapping->host;
+       struct buffer_head *bh;
+       int err = 0;
+       void *kaddr;
+
+       blocksize = inode->i_sb->s_blocksize;
+       length = blocksize - (offset & (blocksize - 1));
+       iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+
+       /*
+        * For "nobh" option,  we can only work if we don't need to
+        * read-in the page - otherwise we create buffers to do the IO.
+        */
+       if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
+            ext4_should_writeback_data(inode) && PageUptodate(page)) {
+               kaddr = kmap_atomic(page, KM_USER0);
+               memset(kaddr + offset, 0, length);
+               flush_dcache_page(page);
+               kunmap_atomic(kaddr, KM_USER0);
+               set_page_dirty(page);
+               goto unlock;
+       }
+
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, blocksize, 0);
+
+       /* Find the buffer that contains "offset" */
+       bh = page_buffers(page);
+       pos = blocksize;
+       while (offset >= pos) {
+               bh = bh->b_this_page;
+               iblock++;
+               pos += blocksize;
+       }
+
+       err = 0;
+       if (buffer_freed(bh)) {
+               BUFFER_TRACE(bh, "freed: skip");
+               goto unlock;
+       }
+
+       if (!buffer_mapped(bh)) {
+               BUFFER_TRACE(bh, "unmapped");
+               ext4_get_block(inode, iblock, bh, 0);
+               /* unmapped? It's a hole - nothing to do */
+               if (!buffer_mapped(bh)) {
+                       BUFFER_TRACE(bh, "still unmapped");
+                       goto unlock;
+               }
+       }
+
+       /* Ok, it's mapped. Make sure it's up-to-date */
+       if (PageUptodate(page))
+               set_buffer_uptodate(bh);
+
+       if (!buffer_uptodate(bh)) {
+               err = -EIO;
+               ll_rw_block(READ, 1, &bh);
+               wait_on_buffer(bh);
+               /* Uhhuh. Read error. Complain and punt. */
+               if (!buffer_uptodate(bh))
+                       goto unlock;
+       }
+
+       if (ext4_should_journal_data(inode)) {
+               BUFFER_TRACE(bh, "get write access");
+               err = ext4_journal_get_write_access(handle, bh);
+               if (err)
+                       goto unlock;
+       }
+
+       kaddr = kmap_atomic(page, KM_USER0);
+       memset(kaddr + offset, 0, length);
+       flush_dcache_page(page);
+       kunmap_atomic(kaddr, KM_USER0);
+
+       BUFFER_TRACE(bh, "zeroed end of block");
+
+       err = 0;
+       if (ext4_should_journal_data(inode)) {
+               err = ext4_journal_dirty_metadata(handle, bh);
+       } else {
+               if (ext4_should_order_data(inode))
+                       err = ext4_journal_dirty_data(handle, bh);
+               mark_buffer_dirty(bh);
+       }
+
+unlock:
+       unlock_page(page);
+       page_cache_release(page);
+       return err;
+}
+
+/*
+ * Probably it should be a library function... search for first non-zero word
+ * or memcmp with zero_page, whatever is better for particular architecture.
+ * Linus?
+ */
+static inline int all_zeroes(__le32 *p, __le32 *q)
+{
+       while (p < q)
+               if (*p++)
+                       return 0;
+       return 1;
+}
+
+/**
+ *     ext4_find_shared - find the indirect blocks for partial truncation.
+ *     @inode:   inode in question
+ *     @depth:   depth of the affected branch
+ *     @offsets: offsets of pointers in that branch (see ext4_block_to_path)
+ *     @chain:   place to store the pointers to partial indirect blocks
+ *     @top:     place to the (detached) top of branch
+ *
+ *     This is a helper function used by ext4_truncate().
+ *
+ *     When we do truncate() we may have to clean the ends of several
+ *     indirect blocks but leave the blocks themselves alive. Block is
+ *     partially truncated if some data below the new i_size is refered
+ *     from it (and it is on the path to the first completely truncated
+ *     data block, indeed).  We have to free the top of that path along
+ *     with everything to the right of the path. Since no allocation
+ *     past the truncation point is possible until ext4_truncate()
+ *     finishes, we may safely do the latter, but top of branch may
+ *     require special attention - pageout below the truncation point
+ *     might try to populate it.
+ *
+ *     We atomically detach the top of branch from the tree, store the
+ *     block number of its root in *@top, pointers to buffer_heads of
+ *     partially truncated blocks - in @chain[].bh and pointers to
+ *     their last elements that should not be removed - in
+ *     @chain[].p. Return value is the pointer to last filled element
+ *     of @chain.
+ *
+ *     The work left to caller to do the actual freeing of subtrees:
+ *             a) free the subtree starting from *@top
+ *             b) free the subtrees whose roots are stored in
+ *                     (@chain[i].p+1 .. end of @chain[i].bh->b_data)
+ *             c) free the subtrees growing from the inode past the @chain[0].
+ *                     (no partially truncated stuff there).  */
+
+static Indirect *ext4_find_shared(struct inode *inode, int depth,
+                       int offsets[4], Indirect chain[4], __le32 *top)
+{
+       Indirect *partial, *p;
+       int k, err;
+
+       *top = 0;
+       /* Make k index the deepest non-null offest + 1 */
+       for (k = depth; k > 1 && !offsets[k-1]; k--)
+               ;
+       partial = ext4_get_branch(inode, k, offsets, chain, &err);
+       /* Writer: pointers */
+       if (!partial)
+               partial = chain + k-1;
+       /*
+        * If the branch acquired continuation since we've looked at it -
+        * fine, it should all survive and (new) top doesn't belong to us.
+        */
+       if (!partial->key && *partial->p)
+               /* Writer: end */
+               goto no_top;
+       for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
+               ;
+       /*
+        * OK, we've found the last block that must survive. The rest of our
+        * branch should be detached before unlocking. However, if that rest
+        * of branch is all ours and does not grow immediately from the inode
+        * it's easier to cheat and just decrement partial->p.
+        */
+       if (p == chain + k - 1 && p > chain) {
+               p->p--;
+       } else {
+               *top = *p->p;
+               /* Nope, don't do this in ext4.  Must leave the tree intact */
+#if 0
+               *p->p = 0;
+#endif
+       }
+       /* Writer: end */
+
+       while(partial > p) {
+               brelse(partial->bh);
+               partial--;
+       }
+no_top:
+       return partial;
+}
+
+/*
+ * Zero a number of block pointers in either an inode or an indirect block.
+ * If we restart the transaction we must again get write access to the
+ * indirect block for further modification.
+ *
+ * We release `count' blocks on disk, but (last - first) may be greater
+ * than `count' because there can be holes in there.
+ */
+static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
+               struct buffer_head *bh, ext4_fsblk_t block_to_free,
+               unsigned long count, __le32 *first, __le32 *last)
+{
+       __le32 *p;
+       if (try_to_extend_transaction(handle, inode)) {
+               if (bh) {
+                       BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
+                       ext4_journal_dirty_metadata(handle, bh);
+               }
+               ext4_mark_inode_dirty(handle, inode);
+               ext4_journal_test_restart(handle, inode);
+               if (bh) {
+                       BUFFER_TRACE(bh, "retaking write access");
+                       ext4_journal_get_write_access(handle, bh);
+               }
+       }
+
+       /*
+        * Any buffers which are on the journal will be in memory. We find
+        * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
+        * on them.  We've already detached each block from the file, so
+        * bforget() in jbd2_journal_forget() should be safe.
+        *
+        * AKPM: turn on bforget in jbd2_journal_forget()!!!
+        */
+       for (p = first; p < last; p++) {
+               u32 nr = le32_to_cpu(*p);
+               if (nr) {
+                       struct buffer_head *bh;
+
+                       *p = 0;
+                       bh = sb_find_get_block(inode->i_sb, nr);
+                       ext4_forget(handle, 0, inode, bh, nr);
+               }
+       }
+
+       ext4_free_blocks(handle, inode, block_to_free, count);
+}
+
+/**
+ * ext4_free_data - free a list of data blocks
+ * @handle:    handle for this transaction
+ * @inode:     inode we are dealing with
+ * @this_bh:   indirect buffer_head which contains *@first and *@last
+ * @first:     array of block numbers
+ * @last:      points immediately past the end of array
+ *
+ * We are freeing all blocks refered from that array (numbers are stored as
+ * little-endian 32-bit) and updating @inode->i_blocks appropriately.
+ *
+ * We accumulate contiguous runs of blocks to free.  Conveniently, if these
+ * blocks are contiguous then releasing them at one time will only affect one
+ * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
+ * actually use a lot of journal space.
+ *
+ * @this_bh will be %NULL if @first and @last point into the inode's direct
+ * block pointers.
+ */
+static void ext4_free_data(handle_t *handle, struct inode *inode,
+                          struct buffer_head *this_bh,
+                          __le32 *first, __le32 *last)
+{
+       ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
+       unsigned long count = 0;            /* Number of blocks in the run */
+       __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
+                                              corresponding to
+                                              block_to_free */
+       ext4_fsblk_t nr;                    /* Current block # */
+       __le32 *p;                          /* Pointer into inode/ind
+                                              for current block */
+       int err;
+
+       if (this_bh) {                          /* For indirect block */
+               BUFFER_TRACE(this_bh, "get_write_access");
+               err = ext4_journal_get_write_access(handle, this_bh);
+               /* Important: if we can't update the indirect pointers
+                * to the blocks, we can't free them. */
+               if (err)
+                       return;
+       }
+
+       for (p = first; p < last; p++) {
+               nr = le32_to_cpu(*p);
+               if (nr) {
+                       /* accumulate blocks to free if they're contiguous */
+                       if (count == 0) {
+                               block_to_free = nr;
+                               block_to_free_p = p;
+                               count = 1;
+                       } else if (nr == block_to_free + count) {
+                               count++;
+                       } else {
+                               ext4_clear_blocks(handle, inode, this_bh,
+                                                 block_to_free,
+                                                 count, block_to_free_p, p);
+                               block_to_free = nr;
+                               block_to_free_p = p;
+                               count = 1;
+                       }
+               }
+       }
+
+       if (count > 0)
+               ext4_clear_blocks(handle, inode, this_bh, block_to_free,
+                                 count, block_to_free_p, p);
+
+       if (this_bh) {
+               BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
+               ext4_journal_dirty_metadata(handle, this_bh);
+       }
+}
+
+/**
+ *     ext4_free_branches - free an array of branches
+ *     @handle: JBD handle for this transaction
+ *     @inode: inode we are dealing with
+ *     @parent_bh: the buffer_head which contains *@first and *@last
+ *     @first: array of block numbers
+ *     @last:  pointer immediately past the end of array
+ *     @depth: depth of the branches to free
+ *
+ *     We are freeing all blocks refered from these branches (numbers are
+ *     stored as little-endian 32-bit) and updating @inode->i_blocks
+ *     appropriately.
+ */
+static void ext4_free_branches(handle_t *handle, struct inode *inode,
+                              struct buffer_head *parent_bh,
+                              __le32 *first, __le32 *last, int depth)
+{
+       ext4_fsblk_t nr;
+       __le32 *p;
+
+       if (is_handle_aborted(handle))
+               return;
+
+       if (depth--) {
+               struct buffer_head *bh;
+               int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+               p = last;
+               while (--p >= first) {
+                       nr = le32_to_cpu(*p);
+                       if (!nr)
+                               continue;               /* A hole */
+
+                       /* Go read the buffer for the next level down */
+                       bh = sb_bread(inode->i_sb, nr);
+
+                       /*
+                        * A read failure? Report error and clear slot
+                        * (should be rare).
+                        */
+                       if (!bh) {
+                               ext4_error(inode->i_sb, "ext4_free_branches",
+                                          "Read failure, inode=%lu, block=%llu",
+                                          inode->i_ino, nr);
+                               continue;
+                       }
+
+                       /* This zaps the entire block.  Bottom up. */
+                       BUFFER_TRACE(bh, "free child branches");
+                       ext4_free_branches(handle, inode, bh,
+                                          (__le32*)bh->b_data,
+                                          (__le32*)bh->b_data + addr_per_block,
+                                          depth);
+
+                       /*
+                        * We've probably journalled the indirect block several
+                        * times during the truncate.  But it's no longer
+                        * needed and we now drop it from the transaction via
+                        * jbd2_journal_revoke().
+                        *
+                        * That's easy if it's exclusively part of this
+                        * transaction.  But if it's part of the committing
+                        * transaction then jbd2_journal_forget() will simply
+                        * brelse() it.  That means that if the underlying
+                        * block is reallocated in ext4_get_block(),
+                        * unmap_underlying_metadata() will find this block
+                        * and will try to get rid of it.  damn, damn.
+                        *
+                        * If this block has already been committed to the
+                        * journal, a revoke record will be written.  And
+                        * revoke records must be emitted *before* clearing
+                        * this block's bit in the bitmaps.
+                        */
+                       ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
+
+                       /*
+                        * Everything below this this pointer has been
+                        * released.  Now let this top-of-subtree go.
+                        *
+                        * We want the freeing of this indirect block to be
+                        * atomic in the journal with the updating of the
+                        * bitmap block which owns it.  So make some room in
+                        * the journal.
+                        *
+                        * We zero the parent pointer *after* freeing its
+                        * pointee in the bitmaps, so if extend_transaction()
+                        * for some reason fails to put the bitmap changes and
+                        * the release into the same transaction, recovery
+                        * will merely complain about releasing a free block,
+                        * rather than leaking blocks.
+                        */
+                       if (is_handle_aborted(handle))
+                               return;
+                       if (try_to_extend_transaction(handle, inode)) {
+                               ext4_mark_inode_dirty(handle, inode);
+                               ext4_journal_test_restart(handle, inode);
+                       }
+
+                       ext4_free_blocks(handle, inode, nr, 1);
+
+                       if (parent_bh) {
+                               /*
+                                * The block which we have just freed is
+                                * pointed to by an indirect block: journal it
+                                */
+                               BUFFER_TRACE(parent_bh, "get_write_access");
+                               if (!ext4_journal_get_write_access(handle,
+                                                                  parent_bh)){
+                                       *p = 0;
+                                       BUFFER_TRACE(parent_bh,
+                                       "call ext4_journal_dirty_metadata");
+                                       ext4_journal_dirty_metadata(handle,
+                                                                   parent_bh);
+                               }
+                       }
+               }
+       } else {
+               /* We have reached the bottom of the tree. */
+               BUFFER_TRACE(parent_bh, "free data blocks");
+               ext4_free_data(handle, inode, parent_bh, first, last);
+       }
+}
+
+/*
+ * ext4_truncate()
+ *
+ * We block out ext4_get_block() block instantiations across the entire
+ * transaction, and VFS/VM ensures that ext4_truncate() cannot run
+ * simultaneously on behalf of the same inode.
+ *
+ * As we work through the truncate and commmit bits of it to the journal there
+ * is one core, guiding principle: the file's tree must always be consistent on
+ * disk.  We must be able to restart the truncate after a crash.
+ *
+ * The file's tree may be transiently inconsistent in memory (although it
+ * probably isn't), but whenever we close off and commit a journal transaction,
+ * the contents of (the filesystem + the journal) must be consistent and
+ * restartable.  It's pretty simple, really: bottom up, right to left (although
+ * left-to-right works OK too).
+ *
+ * Note that at recovery time, journal replay occurs *before* the restart of
+ * truncate against the orphan inode list.
+ *
+ * The committed inode has the new, desired i_size (which is the same as
+ * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
+ * that this inode's truncate did not complete and it will again call
+ * ext4_truncate() to have another go.  So there will be instantiated blocks
+ * to the right of the truncation point in a crashed ext4 filesystem.  But
+ * that's fine - as long as they are linked from the inode, the post-crash
+ * ext4_truncate() run will find them and release them.
+ */
+void ext4_truncate(struct inode *inode)
+{
+       handle_t *handle;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       __le32 *i_data = ei->i_data;
+       int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
+       struct address_space *mapping = inode->i_mapping;
+       int offsets[4];
+       Indirect chain[4];
+       Indirect *partial;
+       __le32 nr = 0;
+       int n;
+       long last_block;
+       unsigned blocksize = inode->i_sb->s_blocksize;
+       struct page *page;
+
+       if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+           S_ISLNK(inode->i_mode)))
+               return;
+       if (ext4_inode_is_fast_symlink(inode))
+               return;
+       if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+               return;
+
+       /*
+        * We have to lock the EOF page here, because lock_page() nests
+        * outside jbd2_journal_start().
+        */
+       if ((inode->i_size & (blocksize - 1)) == 0) {
+               /* Block boundary? Nothing to do */
+               page = NULL;
+       } else {
+               page = grab_cache_page(mapping,
+                               inode->i_size >> PAGE_CACHE_SHIFT);
+               if (!page)
+                       return;
+       }
+
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
+               return ext4_ext_truncate(inode, page);
+
+       handle = start_transaction(inode);
+       if (IS_ERR(handle)) {
+               if (page) {
+                       clear_highpage(page);
+                       flush_dcache_page(page);
+                       unlock_page(page);
+                       page_cache_release(page);
+               }
+               return;         /* AKPM: return what? */
+       }
+
+       last_block = (inode->i_size + blocksize-1)
+                                       >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+
+       if (page)
+               ext4_block_truncate_page(handle, page, mapping, inode->i_size);
+
+       n = ext4_block_to_path(inode, last_block, offsets, NULL);
+       if (n == 0)
+               goto out_stop;  /* error */
+
+       /*
+        * OK.  This truncate is going to happen.  We add the inode to the
+        * orphan list, so that if this truncate spans multiple transactions,
+        * and we crash, we will resume the truncate when the filesystem
+        * recovers.  It also marks the inode dirty, to catch the new size.
+        *
+        * Implication: the file must always be in a sane, consistent
+        * truncatable state while each transaction commits.
+        */
+       if (ext4_orphan_add(handle, inode))
+               goto out_stop;
+
+       /*
+        * The orphan list entry will now protect us from any crash which
+        * occurs before the truncate completes, so it is now safe to propagate
+        * the new, shorter inode size (held for now in i_size) into the
+        * on-disk inode. We do this via i_disksize, which is the value which
+        * ext4 *really* writes onto the disk inode.
+        */
+       ei->i_disksize = inode->i_size;
+
+       /*
+        * From here we block out all ext4_get_block() callers who want to
+        * modify the block allocation tree.
+        */
+       mutex_lock(&ei->truncate_mutex);
+
+       if (n == 1) {           /* direct blocks */
+               ext4_free_data(handle, inode, NULL, i_data+offsets[0],
+                              i_data + EXT4_NDIR_BLOCKS);
+               goto do_indirects;
+       }
+
+       partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+       /* Kill the top of shared branch (not detached) */
+       if (nr) {
+               if (partial == chain) {
+                       /* Shared branch grows from the inode */
+                       ext4_free_branches(handle, inode, NULL,
+                                          &nr, &nr+1, (chain+n-1) - partial);
+                       *partial->p = 0;
+                       /*
+                        * We mark the inode dirty prior to restart,
+                        * and prior to stop.  No need for it here.
+                        */
+               } else {
+                       /* Shared branch grows from an indirect block */
+                       BUFFER_TRACE(partial->bh, "get_write_access");
+                       ext4_free_branches(handle, inode, partial->bh,
+                                       partial->p,
+                                       partial->p+1, (chain+n-1) - partial);
+               }
+       }
+       /* Clear the ends of indirect blocks on the shared branch */
+       while (partial > chain) {
+               ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
+                                  (__le32*)partial->bh->b_data+addr_per_block,
+                                  (chain+n-1) - partial);
+               BUFFER_TRACE(partial->bh, "call brelse");
+               brelse (partial->bh);
+               partial--;
+       }
+do_indirects:
+       /* Kill the remaining (whole) subtrees */
+       switch (offsets[0]) {
+       default:
+               nr = i_data[EXT4_IND_BLOCK];
+               if (nr) {
+                       ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
+                       i_data[EXT4_IND_BLOCK] = 0;
+               }
+       case EXT4_IND_BLOCK:
+               nr = i_data[EXT4_DIND_BLOCK];
+               if (nr) {
+                       ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
+                       i_data[EXT4_DIND_BLOCK] = 0;
+               }
+       case EXT4_DIND_BLOCK:
+               nr = i_data[EXT4_TIND_BLOCK];
+               if (nr) {
+                       ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
+                       i_data[EXT4_TIND_BLOCK] = 0;
+               }
+       case EXT4_TIND_BLOCK:
+               ;
+       }
+
+       ext4_discard_reservation(inode);
+
+       mutex_unlock(&ei->truncate_mutex);
+       inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
+       ext4_mark_inode_dirty(handle, inode);
+
+       /*
+        * In a multi-transaction truncate, we only make the final transaction
+        * synchronous
+        */
+       if (IS_SYNC(inode))
+               handle->h_sync = 1;
+out_stop:
+       /*
+        * If this was a simple ftruncate(), and the file will remain alive
+        * then we need to clear up the orphan record which we created above.
+        * However, if this was a real unlink then we were called by
+        * ext4_delete_inode(), and we allow that function to clean up the
+        * orphan info for us.
+        */
+       if (inode->i_nlink)
+               ext4_orphan_del(handle, inode);
+
+       ext4_journal_stop(handle);
+}
+
+static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
+               unsigned long ino, struct ext4_iloc *iloc)
+{
+       unsigned long desc, group_desc, block_group;
+       unsigned long offset;
+       ext4_fsblk_t block;
+       struct buffer_head *bh;
+       struct ext4_group_desc * gdp;
+
+       if (!ext4_valid_inum(sb, ino)) {
+               /*
+                * This error is already checked for in namei.c unless we are
+                * looking at an NFS filehandle, in which case no error
+                * report is needed
+                */
+               return 0;
+       }
+
+       block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
+       if (block_group >= EXT4_SB(sb)->s_groups_count) {
+               ext4_error(sb,"ext4_get_inode_block","group >= groups count");
+               return 0;
+       }
+       smp_rmb();
+       group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+       desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+       bh = EXT4_SB(sb)->s_group_desc[group_desc];
+       if (!bh) {
+               ext4_error (sb, "ext4_get_inode_block",
+                           "Descriptor not loaded");
+               return 0;
+       }
+
+       gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
+               desc * EXT4_DESC_SIZE(sb));
+       /*
+        * Figure out the offset within the block group inode table
+        */
+       offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
+               EXT4_INODE_SIZE(sb);
+       block = ext4_inode_table(sb, gdp) +
+               (offset >> EXT4_BLOCK_SIZE_BITS(sb));
+
+       iloc->block_group = block_group;
+       iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
+       return block;
+}
+
+/*
+ * ext4_get_inode_loc returns with an extra refcount against the inode's
+ * underlying buffer_head on success. If 'in_mem' is true, we have all
+ * data in memory that is needed to recreate the on-disk version of this
+ * inode.
+ */
+static int __ext4_get_inode_loc(struct inode *inode,
+                               struct ext4_iloc *iloc, int in_mem)
+{
+       ext4_fsblk_t block;
+       struct buffer_head *bh;
+
+       block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
+       if (!block)
+               return -EIO;
+
+       bh = sb_getblk(inode->i_sb, block);
+       if (!bh) {
+               ext4_error (inode->i_sb, "ext4_get_inode_loc",
+                               "unable to read inode block - "
+                               "inode=%lu, block=%llu",
+                                inode->i_ino, block);
+               return -EIO;
+       }
+       if (!buffer_uptodate(bh)) {
+               lock_buffer(bh);
+               if (buffer_uptodate(bh)) {
+                       /* someone brought it uptodate while we waited */
+                       unlock_buffer(bh);
+                       goto has_buffer;
+               }
+
+               /*
+                * If we have all information of the inode in memory and this
+                * is the only valid inode in the block, we need not read the
+                * block.
+                */
+               if (in_mem) {
+                       struct buffer_head *bitmap_bh;
+                       struct ext4_group_desc *desc;
+                       int inodes_per_buffer;
+                       int inode_offset, i;
+                       int block_group;
+                       int start;
+
+                       block_group = (inode->i_ino - 1) /
+                                       EXT4_INODES_PER_GROUP(inode->i_sb);
+                       inodes_per_buffer = bh->b_size /
+                               EXT4_INODE_SIZE(inode->i_sb);
+                       inode_offset = ((inode->i_ino - 1) %
+                                       EXT4_INODES_PER_GROUP(inode->i_sb));
+                       start = inode_offset & ~(inodes_per_buffer - 1);
+
+                       /* Is the inode bitmap in cache? */
+                       desc = ext4_get_group_desc(inode->i_sb,
+                                               block_group, NULL);
+                       if (!desc)
+                               goto make_io;
+
+                       bitmap_bh = sb_getblk(inode->i_sb,
+                               ext4_inode_bitmap(inode->i_sb, desc));
+                       if (!bitmap_bh)
+                               goto make_io;
+
+                       /*
+                        * If the inode bitmap isn't in cache then the
+                        * optimisation may end up performing two reads instead
+                        * of one, so skip it.
+                        */
+                       if (!buffer_uptodate(bitmap_bh)) {
+                               brelse(bitmap_bh);
+                               goto make_io;
+                       }
+                       for (i = start; i < start + inodes_per_buffer; i++) {
+                               if (i == inode_offset)
+                                       continue;
+                               if (ext4_test_bit(i, bitmap_bh->b_data))
+                                       break;
+                       }
+                       brelse(bitmap_bh);
+                       if (i == start + inodes_per_buffer) {
+                               /* all other inodes are free, so skip I/O */
+                               memset(bh->b_data, 0, bh->b_size);
+                               set_buffer_uptodate(bh);
+                               unlock_buffer(bh);
+                               goto has_buffer;
+                       }
+               }
+
+make_io:
+               /*
+                * There are other valid inodes in the buffer, this inode
+                * has in-inode xattrs, or we don't have this inode in memory.
+                * Read the block from disk.
+                */
+               get_bh(bh);
+               bh->b_end_io = end_buffer_read_sync;
+               submit_bh(READ_META, bh);
+               wait_on_buffer(bh);
+               if (!buffer_uptodate(bh)) {
+                       ext4_error(inode->i_sb, "ext4_get_inode_loc",
+                                       "unable to read inode block - "
+                                       "inode=%lu, block=%llu",
+                                       inode->i_ino, block);
+                       brelse(bh);
+                       return -EIO;
+               }
+       }
+has_buffer:
+       iloc->bh = bh;
+       return 0;
+}
+
+int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
+{
+       /* We have all inode data except xattrs in memory here. */
+       return __ext4_get_inode_loc(inode, iloc,
+               !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
+}
+
+void ext4_set_inode_flags(struct inode *inode)
+{
+       unsigned int flags = EXT4_I(inode)->i_flags;
+
+       inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+       if (flags & EXT4_SYNC_FL)
+               inode->i_flags |= S_SYNC;
+       if (flags & EXT4_APPEND_FL)
+               inode->i_flags |= S_APPEND;
+       if (flags & EXT4_IMMUTABLE_FL)
+               inode->i_flags |= S_IMMUTABLE;
+       if (flags & EXT4_NOATIME_FL)
+               inode->i_flags |= S_NOATIME;
+       if (flags & EXT4_DIRSYNC_FL)
+               inode->i_flags |= S_DIRSYNC;
+}
+
+void ext4_read_inode(struct inode * inode)
+{
+       struct ext4_iloc iloc;
+       struct ext4_inode *raw_inode;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct buffer_head *bh;
+       int block;
+
+#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
+       ei->i_acl = EXT4_ACL_NOT_CACHED;
+       ei->i_default_acl = EXT4_ACL_NOT_CACHED;
+#endif
+       ei->i_block_alloc_info = NULL;
+
+       if (__ext4_get_inode_loc(inode, &iloc, 0))
+               goto bad_inode;
+       bh = iloc.bh;
+       raw_inode = ext4_raw_inode(&iloc);
+       inode->i_mode = le16_to_cpu(raw_inode->i_mode);
+       inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+       inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+       if(!(test_opt (inode->i_sb, NO_UID32))) {
+               inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+               inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+       }
+       inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
+       inode->i_size = le32_to_cpu(raw_inode->i_size);
+       inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
+       inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
+       inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
+       inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
+
+       ei->i_state = 0;
+       ei->i_dir_start_lookup = 0;
+       ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
+       /* We now have enough fields to check if the inode was active or not.
+        * This is needed because nfsd might try to access dead inodes
+        * the test is that same one that e2fsck uses
+        * NeilBrown 1999oct15
+        */
+       if (inode->i_nlink == 0) {
+               if (inode->i_mode == 0 ||
+                   !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
+                       /* this inode is deleted */
+                       brelse (bh);
+                       goto bad_inode;
+               }
+               /* The only unlinked inodes we let through here have
+                * valid i_mode and are being read by the orphan
+                * recovery code: that's fine, we're about to complete
+                * the process of deleting those. */
+       }
+       inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
+       ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+#ifdef EXT4_FRAGMENTS
+       ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
+       ei->i_frag_no = raw_inode->i_frag;
+       ei->i_frag_size = raw_inode->i_fsize;
+#endif
+       ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
+       if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+           cpu_to_le32(EXT4_OS_HURD))
+               ei->i_file_acl |=
+                       ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
+       if (!S_ISREG(inode->i_mode)) {
+               ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
+       } else {
+               inode->i_size |=
+                       ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
+       }
+       ei->i_disksize = inode->i_size;
+       inode->i_generation = le32_to_cpu(raw_inode->i_generation);
+       ei->i_block_group = iloc.block_group;
+       /*
+        * NOTE! The in-memory inode i_data array is in little-endian order
+        * even on big-endian machines: we do NOT byteswap the block numbers!
+        */
+       for (block = 0; block < EXT4_N_BLOCKS; block++)
+               ei->i_data[block] = raw_inode->i_block[block];
+       INIT_LIST_HEAD(&ei->i_orphan);
+
+       if (inode->i_ino >= EXT4_FIRST_INO(inode->i_sb) + 1 &&
+           EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+               /*
+                * When mke2fs creates big inodes it does not zero out
+                * the unused bytes above EXT4_GOOD_OLD_INODE_SIZE,
+                * so ignore those first few inodes.
+                */
+               ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
+               if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
+                   EXT4_INODE_SIZE(inode->i_sb))
+                       goto bad_inode;
+               if (ei->i_extra_isize == 0) {
+                       /* The extra space is currently unused. Use it. */
+                       ei->i_extra_isize = sizeof(struct ext4_inode) -
+                                           EXT4_GOOD_OLD_INODE_SIZE;
+               } else {
+                       __le32 *magic = (void *)raw_inode +
+                                       EXT4_GOOD_OLD_INODE_SIZE +
+                                       ei->i_extra_isize;
+                       if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
+                                ei->i_state |= EXT4_STATE_XATTR;
+               }
+       } else
+               ei->i_extra_isize = 0;
+
+       if (S_ISREG(inode->i_mode)) {
+               inode->i_op = &ext4_file_inode_operations;
+               inode->i_fop = &ext4_file_operations;
+               ext4_set_aops(inode);
+       } else if (S_ISDIR(inode->i_mode)) {
+               inode->i_op = &ext4_dir_inode_operations;
+               inode->i_fop = &ext4_dir_operations;
+       } else if (S_ISLNK(inode->i_mode)) {
+               if (ext4_inode_is_fast_symlink(inode))
+                       inode->i_op = &ext4_fast_symlink_inode_operations;
+               else {
+                       inode->i_op = &ext4_symlink_inode_operations;
+                       ext4_set_aops(inode);
+               }
+       } else {
+               inode->i_op = &ext4_special_inode_operations;
+               if (raw_inode->i_block[0])
+                       init_special_inode(inode, inode->i_mode,
+                          old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
+               else
+                       init_special_inode(inode, inode->i_mode,
+                          new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
+       }
+       brelse (iloc.bh);
+       ext4_set_inode_flags(inode);
+       return;
+
+bad_inode:
+       make_bad_inode(inode);
+       return;
+}
+
+/*
+ * Post the struct inode info into an on-disk inode location in the
+ * buffer-cache.  This gobbles the caller's reference to the
+ * buffer_head in the inode location struct.
+ *
+ * The caller must have write access to iloc->bh.
+ */
+static int ext4_do_update_inode(handle_t *handle,
+                               struct inode *inode,
+                               struct ext4_iloc *iloc)
+{
+       struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct buffer_head *bh = iloc->bh;
+       int err = 0, rc, block;
+
+       /* For fields not not tracking in the in-memory inode,
+        * initialise them to zero for new inodes. */
+       if (ei->i_state & EXT4_STATE_NEW)
+               memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
+
+       raw_inode->i_mode = cpu_to_le16(inode->i_mode);
+       if(!(test_opt(inode->i_sb, NO_UID32))) {
+               raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
+               raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
+/*
+ * Fix up interoperability with old kernels. Otherwise, old inodes get
+ * re-used with the upper 16 bits of the uid/gid intact
+ */
+               if(!ei->i_dtime) {
+                       raw_inode->i_uid_high =
+                               cpu_to_le16(high_16_bits(inode->i_uid));
+                       raw_inode->i_gid_high =
+                               cpu_to_le16(high_16_bits(inode->i_gid));
+               } else {
+                       raw_inode->i_uid_high = 0;
+                       raw_inode->i_gid_high = 0;
+               }
+       } else {
+               raw_inode->i_uid_low =
+                       cpu_to_le16(fs_high2lowuid(inode->i_uid));
+               raw_inode->i_gid_low =
+                       cpu_to_le16(fs_high2lowgid(inode->i_gid));
+               raw_inode->i_uid_high = 0;
+               raw_inode->i_gid_high = 0;
+       }
+       raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+       raw_inode->i_size = cpu_to_le32(ei->i_disksize);
+       raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
+       raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+       raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+       raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
+       raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
+       raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+#ifdef EXT4_FRAGMENTS
+       raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
+       raw_inode->i_frag = ei->i_frag_no;
+       raw_inode->i_fsize = ei->i_frag_size;
+#endif
+       if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
+           cpu_to_le32(EXT4_OS_HURD))
+               raw_inode->i_file_acl_high =
+                       cpu_to_le16(ei->i_file_acl >> 32);
+       raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
+       if (!S_ISREG(inode->i_mode)) {
+               raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
+       } else {
+               raw_inode->i_size_high =
+                       cpu_to_le32(ei->i_disksize >> 32);
+               if (ei->i_disksize > 0x7fffffffULL) {
+                       struct super_block *sb = inode->i_sb;
+                       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
+                           EXT4_SB(sb)->s_es->s_rev_level ==
+                                       cpu_to_le32(EXT4_GOOD_OLD_REV)) {
+                              /* If this is the first large file
+                               * created, add a flag to the superblock.
+                               */
+                               err = ext4_journal_get_write_access(handle,
+                                               EXT4_SB(sb)->s_sbh);
+                               if (err)
+                                       goto out_brelse;
+                               ext4_update_dynamic_rev(sb);
+                               EXT4_SET_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
+                               sb->s_dirt = 1;
+                               handle->h_sync = 1;
+                               err = ext4_journal_dirty_metadata(handle,
+                                               EXT4_SB(sb)->s_sbh);
+                       }
+               }
+       }
+       raw_inode->i_generation = cpu_to_le32(inode->i_generation);
+       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+               if (old_valid_dev(inode->i_rdev)) {
+                       raw_inode->i_block[0] =
+                               cpu_to_le32(old_encode_dev(inode->i_rdev));
+                       raw_inode->i_block[1] = 0;
+               } else {
+                       raw_inode->i_block[0] = 0;
+                       raw_inode->i_block[1] =
+                               cpu_to_le32(new_encode_dev(inode->i_rdev));
+                       raw_inode->i_block[2] = 0;
+               }
+       } else for (block = 0; block < EXT4_N_BLOCKS; block++)
+               raw_inode->i_block[block] = ei->i_data[block];
+
+       if (ei->i_extra_isize)
+               raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
+
+       BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
+       rc = ext4_journal_dirty_metadata(handle, bh);
+       if (!err)
+               err = rc;
+       ei->i_state &= ~EXT4_STATE_NEW;
+
+out_brelse:
+       brelse (bh);
+       ext4_std_error(inode->i_sb, err);
+       return err;
+}
+
+/*
+ * ext4_write_inode()
+ *
+ * We are called from a few places:
+ *
+ * - Within generic_file_write() for O_SYNC files.
+ *   Here, there will be no transaction running. We wait for any running
+ *   trasnaction to commit.
+ *
+ * - Within sys_sync(), kupdate and such.
+ *   We wait on commit, if tol to.
+ *
+ * - Within prune_icache() (PF_MEMALLOC == true)
+ *   Here we simply return.  We can't afford to block kswapd on the
+ *   journal commit.
+ *
+ * In all cases it is actually safe for us to return without doing anything,
+ * because the inode has been copied into a raw inode buffer in
+ * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
+ * knfsd.
+ *
+ * Note that we are absolutely dependent upon all inode dirtiers doing the
+ * right thing: they *must* call mark_inode_dirty() after dirtying info in
+ * which we are interested.
+ *
+ * It would be a bug for them to not do this.  The code:
+ *
+ *     mark_inode_dirty(inode)
+ *     stuff();
+ *     inode->i_size = expr;
+ *
+ * is in error because a kswapd-driven write_inode() could occur while
+ * `stuff()' is running, and the new i_size will be lost.  Plus the inode
+ * will no longer be on the superblock's dirty inode list.
+ */
+int ext4_write_inode(struct inode *inode, int wait)
+{
+       if (current->flags & PF_MEMALLOC)
+               return 0;
+
+       if (ext4_journal_current_handle()) {
+               jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
+               dump_stack();
+               return -EIO;
+       }
+
+       if (!wait)
+               return 0;
+
+       return ext4_force_commit(inode->i_sb);
+}
+
+/*
+ * ext4_setattr()
+ *
+ * Called from notify_change.
+ *
+ * We want to trap VFS attempts to truncate the file as soon as
+ * possible.  In particular, we want to make sure that when the VFS
+ * shrinks i_size, we put the inode on the orphan list and modify
+ * i_disksize immediately, so that during the subsequent flushing of
+ * dirty pages and freeing of disk blocks, we can guarantee that any
+ * commit will leave the blocks being flushed in an unused state on
+ * disk.  (On recovery, the inode will get truncated and the blocks will
+ * be freed, so we have a strong guarantee that no future commit will
+ * leave these blocks visible to the user.)
+ *
+ * Called with inode->sem down.
+ */
+int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+{
+       struct inode *inode = dentry->d_inode;
+       int error, rc = 0;
+       const unsigned int ia_valid = attr->ia_valid;
+
+       error = inode_change_ok(inode, attr);
+       if (error)
+               return error;
+
+       if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+               (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+               handle_t *handle;
+
+               /* (user+group)*(old+new) structure, inode write (sb,
+                * inode block, ? - but truncate inode update has it) */
+               handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
+                                       EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
+               if (IS_ERR(handle)) {
+                       error = PTR_ERR(handle);
+                       goto err_out;
+               }
+               error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+               if (error) {
+                       ext4_journal_stop(handle);
+                       return error;
+               }
+               /* Update corresponding info in inode so that everything is in
+                * one transaction */
+               if (attr->ia_valid & ATTR_UID)
+                       inode->i_uid = attr->ia_uid;
+               if (attr->ia_valid & ATTR_GID)
+                       inode->i_gid = attr->ia_gid;
+               error = ext4_mark_inode_dirty(handle, inode);
+               ext4_journal_stop(handle);
+       }
+
+       if (S_ISREG(inode->i_mode) &&
+           attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
+               handle_t *handle;
+
+               handle = ext4_journal_start(inode, 3);
+               if (IS_ERR(handle)) {
+                       error = PTR_ERR(handle);
+                       goto err_out;
+               }
+
+               error = ext4_orphan_add(handle, inode);
+               EXT4_I(inode)->i_disksize = attr->ia_size;
+               rc = ext4_mark_inode_dirty(handle, inode);
+               if (!error)
+                       error = rc;
+               ext4_journal_stop(handle);
+       }
+
+       rc = inode_setattr(inode, attr);
+
+       /* If inode_setattr's call to ext4_truncate failed to get a
+        * transaction handle at all, we need to clean up the in-core
+        * orphan list manually. */
+       if (inode->i_nlink)
+               ext4_orphan_del(NULL, inode);
+
+       if (!rc && (ia_valid & ATTR_MODE))
+               rc = ext4_acl_chmod(inode);
+
+err_out:
+       ext4_std_error(inode->i_sb, error);
+       if (!error)
+               error = rc;
+       return error;
+}
+
+
+/*
+ * How many blocks doth make a writepage()?
+ *
+ * With N blocks per page, it may be:
+ * N data blocks
+ * 2 indirect block
+ * 2 dindirect
+ * 1 tindirect
+ * N+5 bitmap blocks (from the above)
+ * N+5 group descriptor summary blocks
+ * 1 inode block
+ * 1 superblock.
+ * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
+ *
+ * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
+ *
+ * With ordered or writeback data it's the same, less the N data blocks.
+ *
+ * If the inode's direct blocks can hold an integral number of pages then a
+ * page cannot straddle two indirect blocks, and we can only touch one indirect
+ * and dindirect block, and the "5" above becomes "3".
+ *
+ * This still overestimates under most circumstances.  If we were to pass the
+ * start and end offsets in here as well we could do block_to_path() on each
+ * block and work out the exact number of indirects which are touched.  Pah.
+ */
+
+int ext4_writepage_trans_blocks(struct inode *inode)
+{
+       int bpp = ext4_journal_blocks_per_page(inode);
+       int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
+       int ret;
+
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
+               return ext4_ext_writepage_trans_blocks(inode, bpp);
+
+       if (ext4_should_journal_data(inode))
+               ret = 3 * (bpp + indirects) + 2;
+       else
+               ret = 2 * (bpp + indirects) + 2;
+
+#ifdef CONFIG_QUOTA
+       /* We know that structure was already allocated during DQUOT_INIT so
+        * we will be updating only the data blocks + inodes */
+       ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
+#endif
+
+       return ret;
+}
+
+/*
+ * The caller must have previously called ext4_reserve_inode_write().
+ * Give this, we know that the caller already has write access to iloc->bh.
+ */
+int ext4_mark_iloc_dirty(handle_t *handle,
+               struct inode *inode, struct ext4_iloc *iloc)
+{
+       int err = 0;
+
+       /* the do_update_inode consumes one bh->b_count */
+       get_bh(iloc->bh);
+
+       /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
+       err = ext4_do_update_inode(handle, inode, iloc);
+       put_bh(iloc->bh);
+       return err;
+}
+
+/*
+ * On success, We end up with an outstanding reference count against
+ * iloc->bh.  This _must_ be cleaned up later.
+ */
+
+int
+ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
+                        struct ext4_iloc *iloc)
+{
+       int err = 0;
+       if (handle) {
+               err = ext4_get_inode_loc(inode, iloc);
+               if (!err) {
+                       BUFFER_TRACE(iloc->bh, "get_write_access");
+                       err = ext4_journal_get_write_access(handle, iloc->bh);
+                       if (err) {
+                               brelse(iloc->bh);
+                               iloc->bh = NULL;
+                       }
+               }
+       }
+       ext4_std_error(inode->i_sb, err);
+       return err;
+}
+
+/*
+ * What we do here is to mark the in-core inode as clean with respect to inode
+ * dirtiness (it may still be data-dirty).
+ * This means that the in-core inode may be reaped by prune_icache
+ * without having to perform any I/O.  This is a very good thing,
+ * because *any* task may call prune_icache - even ones which
+ * have a transaction open against a different journal.
+ *
+ * Is this cheating?  Not really.  Sure, we haven't written the
+ * inode out, but prune_icache isn't a user-visible syncing function.
+ * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
+ * we start and wait on commits.
+ *
+ * Is this efficient/effective?  Well, we're being nice to the system
+ * by cleaning up our inodes proactively so they can be reaped
+ * without I/O.  But we are potentially leaving up to five seconds'
+ * worth of inodes floating about which prune_icache wants us to
+ * write out.  One way to fix that would be to get prune_icache()
+ * to do a write_super() to free up some memory.  It has the desired
+ * effect.
+ */
+int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+{
+       struct ext4_iloc iloc;
+       int err;
+
+       might_sleep();
+       err = ext4_reserve_inode_write(handle, inode, &iloc);
+       if (!err)
+               err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+       return err;
+}
+
+/*
+ * ext4_dirty_inode() is called from __mark_inode_dirty()
+ *
+ * We're really interested in the case where a file is being extended.
+ * i_size has been changed by generic_commit_write() and we thus need
+ * to include the updated inode in the current transaction.
+ *
+ * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * are allocated to the file.
+ *
+ * If the inode is marked synchronous, we don't honour that here - doing
+ * so would cause a commit on atime updates, which we don't bother doing.
+ * We handle synchronous inodes at the highest possible level.
+ */
+void ext4_dirty_inode(struct inode *inode)
+{
+       handle_t *current_handle = ext4_journal_current_handle();
+       handle_t *handle;
+
+       handle = ext4_journal_start(inode, 2);
+       if (IS_ERR(handle))
+               goto out;
+       if (current_handle &&
+               current_handle->h_transaction != handle->h_transaction) {
+               /* This task has a transaction open against a different fs */
+               printk(KERN_EMERG "%s: transactions do not match!\n",
+                      __FUNCTION__);
+       } else {
+               jbd_debug(5, "marking dirty.  outer handle=%p\n",
+                               current_handle);
+               ext4_mark_inode_dirty(handle, inode);
+       }
+       ext4_journal_stop(handle);
+out:
+       return;
+}
+
+#if 0
+/*
+ * Bind an inode's backing buffer_head into this transaction, to prevent
+ * it from being flushed to disk early.  Unlike
+ * ext4_reserve_inode_write, this leaves behind no bh reference and
+ * returns no iloc structure, so the caller needs to repeat the iloc
+ * lookup to mark the inode dirty later.
+ */
+static int ext4_pin_inode(handle_t *handle, struct inode *inode)
+{
+       struct ext4_iloc iloc;
+
+       int err = 0;
+       if (handle) {
+               err = ext4_get_inode_loc(inode, &iloc);
+               if (!err) {
+                       BUFFER_TRACE(iloc.bh, "get_write_access");
+                       err = jbd2_journal_get_write_access(handle, iloc.bh);
+                       if (!err)
+                               err = ext4_journal_dirty_metadata(handle,
+                                                                 iloc.bh);
+                       brelse(iloc.bh);
+               }
+       }
+       ext4_std_error(inode->i_sb, err);
+       return err;
+}
+#endif
+
+int ext4_change_inode_journal_flag(struct inode *inode, int val)
+{
+       journal_t *journal;
+       handle_t *handle;
+       int err;
+
+       /*
+        * We have to be very careful here: changing a data block's
+        * journaling status dynamically is dangerous.  If we write a
+        * data block to the journal, change the status and then delete
+        * that block, we risk forgetting to revoke the old log record
+        * from the journal and so a subsequent replay can corrupt data.
+        * So, first we make sure that the journal is empty and that
+        * nobody is changing anything.
+        */
+
+       journal = EXT4_JOURNAL(inode);
+       if (is_journal_aborted(journal) || IS_RDONLY(inode))
+               return -EROFS;
+
+       jbd2_journal_lock_updates(journal);
+       jbd2_journal_flush(journal);
+
+       /*
+        * OK, there are no updates running now, and all cached data is
+        * synced to disk.  We are now in a completely consistent state
+        * which doesn't have anything in the journal, and we know that
+        * no filesystem updates are running, so it is safe to modify
+        * the inode's in-core data-journaling state flag now.
+        */
+
+       if (val)
+               EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
+       else
+               EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
+       ext4_set_aops(inode);
+
+       jbd2_journal_unlock_updates(journal);
+
+       /* Finally we can mark the inode as dirty. */
+
+       handle = ext4_journal_start(inode, 1);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       err = ext4_mark_inode_dirty(handle, inode);
+       handle->h_sync = 1;
+       ext4_journal_stop(handle);
+       ext4_std_error(inode->i_sb, err);
+
+       return err;
+}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
new file mode 100644 (file)
index 0000000..22a737c
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * linux/fs/ext4/ioctl.c
+ *
+ * Copyright (C) 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ */
+
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/capability.h>
+#include <linux/ext4_fs.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/time.h>
+#include <linux/compat.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+
+int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
+               unsigned long arg)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       unsigned int flags;
+       unsigned short rsv_window_size;
+
+       ext4_debug ("cmd = %u, arg = %lu\n", cmd, arg);
+
+       switch (cmd) {
+       case EXT4_IOC_GETFLAGS:
+               flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
+               return put_user(flags, (int __user *) arg);
+       case EXT4_IOC_SETFLAGS: {
+               handle_t *handle = NULL;
+               int err;
+               struct ext4_iloc iloc;
+               unsigned int oldflags;
+               unsigned int jflag;
+
+               if (IS_RDONLY(inode))
+                       return -EROFS;
+
+               if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+                       return -EACCES;
+
+               if (get_user(flags, (int __user *) arg))
+                       return -EFAULT;
+
+               if (!S_ISDIR(inode->i_mode))
+                       flags &= ~EXT4_DIRSYNC_FL;
+
+               mutex_lock(&inode->i_mutex);
+               oldflags = ei->i_flags;
+
+               /* The JOURNAL_DATA flag is modifiable only by root */
+               jflag = flags & EXT4_JOURNAL_DATA_FL;
+
+               /*
+                * The IMMUTABLE and APPEND_ONLY flags can only be changed by
+                * the relevant capability.
+                *
+                * This test looks nicer. Thanks to Pauline Middelink
+                */
+               if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
+                       if (!capable(CAP_LINUX_IMMUTABLE)) {
+                               mutex_unlock(&inode->i_mutex);
+                               return -EPERM;
+                       }
+               }
+
+               /*
+                * The JOURNAL_DATA flag can only be changed by
+                * the relevant capability.
+                */
+               if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
+                       if (!capable(CAP_SYS_RESOURCE)) {
+                               mutex_unlock(&inode->i_mutex);
+                               return -EPERM;
+                       }
+               }
+
+
+               handle = ext4_journal_start(inode, 1);
+               if (IS_ERR(handle)) {
+                       mutex_unlock(&inode->i_mutex);
+                       return PTR_ERR(handle);
+               }
+               if (IS_SYNC(inode))
+                       handle->h_sync = 1;
+               err = ext4_reserve_inode_write(handle, inode, &iloc);
+               if (err)
+                       goto flags_err;
+
+               flags = flags & EXT4_FL_USER_MODIFIABLE;
+               flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
+               ei->i_flags = flags;
+
+               ext4_set_inode_flags(inode);
+               inode->i_ctime = CURRENT_TIME_SEC;
+
+               err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+flags_err:
+               ext4_journal_stop(handle);
+               if (err) {
+                       mutex_unlock(&inode->i_mutex);
+                       return err;
+               }
+
+               if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL))
+                       err = ext4_change_inode_journal_flag(inode, jflag);
+               mutex_unlock(&inode->i_mutex);
+               return err;
+       }
+       case EXT4_IOC_GETVERSION:
+       case EXT4_IOC_GETVERSION_OLD:
+               return put_user(inode->i_generation, (int __user *) arg);
+       case EXT4_IOC_SETVERSION:
+       case EXT4_IOC_SETVERSION_OLD: {
+               handle_t *handle;
+               struct ext4_iloc iloc;
+               __u32 generation;
+               int err;
+
+               if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+                       return -EPERM;
+               if (IS_RDONLY(inode))
+                       return -EROFS;
+               if (get_user(generation, (int __user *) arg))
+                       return -EFAULT;
+
+               handle = ext4_journal_start(inode, 1);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+               err = ext4_reserve_inode_write(handle, inode, &iloc);
+               if (err == 0) {
+                       inode->i_ctime = CURRENT_TIME_SEC;
+                       inode->i_generation = generation;
+                       err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+               }
+               ext4_journal_stop(handle);
+               return err;
+       }
+#ifdef CONFIG_JBD_DEBUG
+       case EXT4_IOC_WAIT_FOR_READONLY:
+               /*
+                * This is racy - by the time we're woken up and running,
+                * the superblock could be released.  And the module could
+                * have been unloaded.  So sue me.
+                *
+                * Returns 1 if it slept, else zero.
+                */
+               {
+                       struct super_block *sb = inode->i_sb;
+                       DECLARE_WAITQUEUE(wait, current);
+                       int ret = 0;
+
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       add_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait);
+                       if (timer_pending(&EXT4_SB(sb)->turn_ro_timer)) {
+                               schedule();
+                               ret = 1;
+                       }
+                       remove_wait_queue(&EXT4_SB(sb)->ro_wait_queue, &wait);
+                       return ret;
+               }
+#endif
+       case EXT4_IOC_GETRSVSZ:
+               if (test_opt(inode->i_sb, RESERVATION)
+                       && S_ISREG(inode->i_mode)
+                       && ei->i_block_alloc_info) {
+                       rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
+                       return put_user(rsv_window_size, (int __user *)arg);
+               }
+               return -ENOTTY;
+       case EXT4_IOC_SETRSVSZ: {
+
+               if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
+                       return -ENOTTY;
+
+               if (IS_RDONLY(inode))
+                       return -EROFS;
+
+               if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+                       return -EACCES;
+
+               if (get_user(rsv_window_size, (int __user *)arg))
+                       return -EFAULT;
+
+               if (rsv_window_size > EXT4_MAX_RESERVE_BLOCKS)
+                       rsv_window_size = EXT4_MAX_RESERVE_BLOCKS;
+
+               /*
+                * need to allocate reservation structure for this inode
+                * before set the window size
+                */
+               mutex_lock(&ei->truncate_mutex);
+               if (!ei->i_block_alloc_info)
+                       ext4_init_block_alloc_info(inode);
+
+               if (ei->i_block_alloc_info){
+                       struct ext4_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
+                       rsv->rsv_goal_size = rsv_window_size;
+               }
+               mutex_unlock(&ei->truncate_mutex);
+               return 0;
+       }
+       case EXT4_IOC_GROUP_EXTEND: {
+               ext4_fsblk_t n_blocks_count;
+               struct super_block *sb = inode->i_sb;
+               int err;
+
+               if (!capable(CAP_SYS_RESOURCE))
+                       return -EPERM;
+
+               if (IS_RDONLY(inode))
+                       return -EROFS;
+
+               if (get_user(n_blocks_count, (__u32 __user *)arg))
+                       return -EFAULT;
+
+               err = ext4_group_extend(sb, EXT4_SB(sb)->s_es, n_blocks_count);
+               jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
+               jbd2_journal_flush(EXT4_SB(sb)->s_journal);
+               jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+
+               return err;
+       }
+       case EXT4_IOC_GROUP_ADD: {
+               struct ext4_new_group_data input;
+               struct super_block *sb = inode->i_sb;
+               int err;
+
+               if (!capable(CAP_SYS_RESOURCE))
+                       return -EPERM;
+
+               if (IS_RDONLY(inode))
+                       return -EROFS;
+
+               if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
+                               sizeof(input)))
+                       return -EFAULT;
+
+               err = ext4_group_add(sb, &input);
+               jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
+               jbd2_journal_flush(EXT4_SB(sb)->s_journal);
+               jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+
+               return err;
+       }
+
+       default:
+               return -ENOTTY;
+       }
+}
+
+#ifdef CONFIG_COMPAT
+long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct inode *inode = file->f_dentry->d_inode;
+       int ret;
+
+       /* These are just misnamed, they actually get/put from/to user an int */
+       switch (cmd) {
+       case EXT4_IOC32_GETFLAGS:
+               cmd = EXT4_IOC_GETFLAGS;
+               break;
+       case EXT4_IOC32_SETFLAGS:
+               cmd = EXT4_IOC_SETFLAGS;
+               break;
+       case EXT4_IOC32_GETVERSION:
+               cmd = EXT4_IOC_GETVERSION;
+               break;
+       case EXT4_IOC32_SETVERSION:
+               cmd = EXT4_IOC_SETVERSION;
+               break;
+       case EXT4_IOC32_GROUP_EXTEND:
+               cmd = EXT4_IOC_GROUP_EXTEND;
+               break;
+       case EXT4_IOC32_GETVERSION_OLD:
+               cmd = EXT4_IOC_GETVERSION_OLD;
+               break;
+       case EXT4_IOC32_SETVERSION_OLD:
+               cmd = EXT4_IOC_SETVERSION_OLD;
+               break;
+#ifdef CONFIG_JBD_DEBUG
+       case EXT4_IOC32_WAIT_FOR_READONLY:
+               cmd = EXT4_IOC_WAIT_FOR_READONLY;
+               break;
+#endif
+       case EXT4_IOC32_GETRSVSZ:
+               cmd = EXT4_IOC_GETRSVSZ;
+               break;
+       case EXT4_IOC32_SETRSVSZ:
+               cmd = EXT4_IOC_SETRSVSZ;
+               break;
+       case EXT4_IOC_GROUP_ADD:
+               break;
+       default:
+               return -ENOIOCTLCMD;
+       }
+       lock_kernel();
+       ret = ext4_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
+       unlock_kernel();
+       return ret;
+}
+#endif
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
new file mode 100644 (file)
index 0000000..8b1bd03
--- /dev/null
@@ -0,0 +1,2395 @@
+/*
+ *  linux/fs/ext4/namei.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/namei.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ *  Directory entry file type support and forward compatibility hooks
+ *     for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
+ *  Hash Tree Directory indexing (c)
+ *     Daniel Phillips, 2001
+ *  Hash Tree Directory indexing porting
+ *     Christopher Li, 2002
+ *  Hash Tree Directory indexing cleanup
+ *     Theodore Ts'o, 2002
+ */
+
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/jbd2.h>
+#include <linux/time.h>
+#include <linux/ext4_fs.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/bio.h>
+#include <linux/smp_lock.h>
+
+#include "namei.h"
+#include "xattr.h"
+#include "acl.h"
+
+/*
+ * define how far ahead to read directories while searching them.
+ */
+#define NAMEI_RA_CHUNKS  2
+#define NAMEI_RA_BLOCKS  4
+#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
+#define NAMEI_RA_INDEX(c,b)  (((c) * NAMEI_RA_BLOCKS) + (b))
+
+static struct buffer_head *ext4_append(handle_t *handle,
+                                       struct inode *inode,
+                                       u32 *block, int *err)
+{
+       struct buffer_head *bh;
+
+       *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
+
+       if ((bh = ext4_bread(handle, inode, *block, 1, err))) {
+               inode->i_size += inode->i_sb->s_blocksize;
+               EXT4_I(inode)->i_disksize = inode->i_size;
+               ext4_journal_get_write_access(handle,bh);
+       }
+       return bh;
+}
+
+#ifndef assert
+#define assert(test) J_ASSERT(test)
+#endif
+
+#ifndef swap
+#define swap(x, y) do { typeof(x) z = x; x = y; y = z; } while (0)
+#endif
+
+#ifdef DX_DEBUG
+#define dxtrace(command) command
+#else
+#define dxtrace(command)
+#endif
+
+struct fake_dirent
+{
+       __le32 inode;
+       __le16 rec_len;
+       u8 name_len;
+       u8 file_type;
+};
+
+struct dx_countlimit
+{
+       __le16 limit;
+       __le16 count;
+};
+
+struct dx_entry
+{
+       __le32 hash;
+       __le32 block;
+};
+
+/*
+ * dx_root_info is laid out so that if it should somehow get overlaid by a
+ * dirent the two low bits of the hash version will be zero.  Therefore, the
+ * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
+ */
+
+struct dx_root
+{
+       struct fake_dirent dot;
+       char dot_name[4];
+       struct fake_dirent dotdot;
+       char dotdot_name[4];
+       struct dx_root_info
+       {
+               __le32 reserved_zero;
+               u8 hash_version;
+               u8 info_length; /* 8 */
+               u8 indirect_levels;
+               u8 unused_flags;
+       }
+       info;
+       struct dx_entry entries[0];
+};
+
+struct dx_node
+{
+       struct fake_dirent fake;
+       struct dx_entry entries[0];
+};
+
+
+struct dx_frame
+{
+       struct buffer_head *bh;
+       struct dx_entry *entries;
+       struct dx_entry *at;
+};
+
+struct dx_map_entry
+{
+       u32 hash;
+       u32 offs;
+};
+
+#ifdef CONFIG_EXT4_INDEX
+static inline unsigned dx_get_block (struct dx_entry *entry);
+static void dx_set_block (struct dx_entry *entry, unsigned value);
+static inline unsigned dx_get_hash (struct dx_entry *entry);
+static void dx_set_hash (struct dx_entry *entry, unsigned value);
+static unsigned dx_get_count (struct dx_entry *entries);
+static unsigned dx_get_limit (struct dx_entry *entries);
+static void dx_set_count (struct dx_entry *entries, unsigned value);
+static void dx_set_limit (struct dx_entry *entries, unsigned value);
+static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
+static unsigned dx_node_limit (struct inode *dir);
+static struct dx_frame *dx_probe(struct dentry *dentry,
+                                struct inode *dir,
+                                struct dx_hash_info *hinfo,
+                                struct dx_frame *frame,
+                                int *err);
+static void dx_release (struct dx_frame *frames);
+static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
+                       struct dx_hash_info *hinfo, struct dx_map_entry map[]);
+static void dx_sort_map(struct dx_map_entry *map, unsigned count);
+static struct ext4_dir_entry_2 *dx_move_dirents (char *from, char *to,
+               struct dx_map_entry *offsets, int count);
+static struct ext4_dir_entry_2* dx_pack_dirents (char *base, int size);
+static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
+static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+                                struct dx_frame *frame,
+                                struct dx_frame *frames,
+                                __u32 *start_hash);
+static struct buffer_head * ext4_dx_find_entry(struct dentry *dentry,
+                      struct ext4_dir_entry_2 **res_dir, int *err);
+static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
+                            struct inode *inode);
+
+/*
+ * Future: use high four bits of block for coalesce-on-delete flags
+ * Mask them off for now.
+ */
+
+static inline unsigned dx_get_block (struct dx_entry *entry)
+{
+       return le32_to_cpu(entry->block) & 0x00ffffff;
+}
+
+static inline void dx_set_block (struct dx_entry *entry, unsigned value)
+{
+       entry->block = cpu_to_le32(value);
+}
+
+static inline unsigned dx_get_hash (struct dx_entry *entry)
+{
+       return le32_to_cpu(entry->hash);
+}
+
+static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
+{
+       entry->hash = cpu_to_le32(value);
+}
+
+static inline unsigned dx_get_count (struct dx_entry *entries)
+{
+       return le16_to_cpu(((struct dx_countlimit *) entries)->count);
+}
+
+static inline unsigned dx_get_limit (struct dx_entry *entries)
+{
+       return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
+}
+
+static inline void dx_set_count (struct dx_entry *entries, unsigned value)
+{
+       ((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
+}
+
+static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
+{
+       ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
+}
+
+static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
+{
+       unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
+               EXT4_DIR_REC_LEN(2) - infosize;
+       return 0? 20: entry_space / sizeof(struct dx_entry);
+}
+
+static inline unsigned dx_node_limit (struct inode *dir)
+{
+       unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
+       return 0? 22: entry_space / sizeof(struct dx_entry);
+}
+
+/*
+ * Debug
+ */
+#ifdef DX_DEBUG
+static void dx_show_index (char * label, struct dx_entry *entries)
+{
+       int i, n = dx_get_count (entries);
+        printk("%s index ", label);
+       for (i = 0; i < n; i++) {
+               printk("%x->%u ", i? dx_get_hash(entries + i) :
+                               0, dx_get_block(entries + i));
+       }
+       printk("\n");
+}
+
+struct stats
+{
+       unsigned names;
+       unsigned space;
+       unsigned bcount;
+};
+
+static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext4_dir_entry_2 *de,
+                                int size, int show_names)
+{
+       unsigned names = 0, space = 0;
+       char *base = (char *) de;
+       struct dx_hash_info h = *hinfo;
+
+       printk("names: ");
+       while ((char *) de < base + size)
+       {
+               if (de->inode)
+               {
+                       if (show_names)
+                       {
+                               int len = de->name_len;
+                               char *name = de->name;
+                               while (len--) printk("%c", *name++);
+                               ext4fs_dirhash(de->name, de->name_len, &h);
+                               printk(":%x.%u ", h.hash,
+                                      ((char *) de - base));
+                       }
+                       space += EXT4_DIR_REC_LEN(de->name_len);
+                       names++;
+               }
+               de = (struct ext4_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
+       }
+       printk("(%i)\n", names);
+       return (struct stats) { names, space, 1 };
+}
+
+struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
+                            struct dx_entry *entries, int levels)
+{
+       unsigned blocksize = dir->i_sb->s_blocksize;
+       unsigned count = dx_get_count (entries), names = 0, space = 0, i;
+       unsigned bcount = 0;
+       struct buffer_head *bh;
+       int err;
+       printk("%i indexed blocks...\n", count);
+       for (i = 0; i < count; i++, entries++)
+       {
+               u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
+               u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
+               struct stats stats;
+               printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
+               if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue;
+               stats = levels?
+                  dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
+                  dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
+               names += stats.names;
+               space += stats.space;
+               bcount += stats.bcount;
+               brelse (bh);
+       }
+       if (bcount)
+               printk("%snames %u, fullness %u (%u%%)\n", levels?"":"   ",
+                       names, space/bcount,(space/bcount)*100/blocksize);
+       return (struct stats) { names, space, bcount};
+}
+#endif /* DX_DEBUG */
+
+/*
+ * Probe for a directory leaf block to search.
+ *
+ * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
+ * error in the directory index, and the caller should fall back to
+ * searching the directory normally.  The callers of dx_probe **MUST**
+ * check for this error code, and make sure it never gets reflected
+ * back to userspace.
+ */
+static struct dx_frame *
+dx_probe(struct dentry *dentry, struct inode *dir,
+        struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
+{
+       unsigned count, indirect;
+       struct dx_entry *at, *entries, *p, *q, *m;
+       struct dx_root *root;
+       struct buffer_head *bh;
+       struct dx_frame *frame = frame_in;
+       u32 hash;
+
+       frame->bh = NULL;
+       if (dentry)
+               dir = dentry->d_parent->d_inode;
+       if (!(bh = ext4_bread (NULL,dir, 0, 0, err)))
+               goto fail;
+       root = (struct dx_root *) bh->b_data;
+       if (root->info.hash_version != DX_HASH_TEA &&
+           root->info.hash_version != DX_HASH_HALF_MD4 &&
+           root->info.hash_version != DX_HASH_LEGACY) {
+               ext4_warning(dir->i_sb, __FUNCTION__,
+                            "Unrecognised inode hash code %d",
+                            root->info.hash_version);
+               brelse(bh);
+               *err = ERR_BAD_DX_DIR;
+               goto fail;
+       }
+       hinfo->hash_version = root->info.hash_version;
+       hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+       if (dentry)
+               ext4fs_dirhash(dentry->d_name.name, dentry->d_name.len, hinfo);
+       hash = hinfo->hash;
+
+       if (root->info.unused_flags & 1) {
+               ext4_warning(dir->i_sb, __FUNCTION__,
+                            "Unimplemented inode hash flags: %#06x",
+                            root->info.unused_flags);
+               brelse(bh);
+               *err = ERR_BAD_DX_DIR;
+               goto fail;
+       }
+
+       if ((indirect = root->info.indirect_levels) > 1) {
+               ext4_warning(dir->i_sb, __FUNCTION__,
+                            "Unimplemented inode hash depth: %#06x",
+                            root->info.indirect_levels);
+               brelse(bh);
+               *err = ERR_BAD_DX_DIR;
+               goto fail;
+       }
+
+       entries = (struct dx_entry *) (((char *)&root->info) +
+                                      root->info.info_length);
+       assert(dx_get_limit(entries) == dx_root_limit(dir,
+                                                     root->info.info_length));
+       dxtrace (printk("Look up %x", hash));
+       while (1)
+       {
+               count = dx_get_count(entries);
+               assert (count && count <= dx_get_limit(entries));
+               p = entries + 1;
+               q = entries + count - 1;
+               while (p <= q)
+               {
+                       m = p + (q - p)/2;
+                       dxtrace(printk("."));
+                       if (dx_get_hash(m) > hash)
+                               q = m - 1;
+                       else
+                               p = m + 1;
+               }
+
+               if (0) // linear search cross check
+               {
+                       unsigned n = count - 1;
+                       at = entries;
+                       while (n--)
+                       {
+                               dxtrace(printk(","));
+                               if (dx_get_hash(++at) > hash)
+                               {
+                                       at--;
+                                       break;
+                               }
+                       }
+                       assert (at == p - 1);
+               }
+
+               at = p - 1;
+               dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
+               frame->bh = bh;
+               frame->entries = entries;
+               frame->at = at;
+               if (!indirect--) return frame;
+               if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
+                       goto fail2;
+               at = entries = ((struct dx_node *) bh->b_data)->entries;
+               assert (dx_get_limit(entries) == dx_node_limit (dir));
+               frame++;
+       }
+fail2:
+       while (frame >= frame_in) {
+               brelse(frame->bh);
+               frame--;
+       }
+fail:
+       return NULL;
+}
+
+static void dx_release (struct dx_frame *frames)
+{
+       if (frames[0].bh == NULL)
+               return;
+
+       if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
+               brelse(frames[1].bh);
+       brelse(frames[0].bh);
+}
+
+/*
+ * This function increments the frame pointer to search the next leaf
+ * block, and reads in the necessary intervening nodes if the search
+ * should be necessary.  Whether or not the search is necessary is
+ * controlled by the hash parameter.  If the hash value is even, then
+ * the search is only continued if the next block starts with that
+ * hash value.  This is used if we are searching for a specific file.
+ *
+ * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
+ *
+ * This function returns 1 if the caller should continue to search,
+ * or 0 if it should not.  If there is an error reading one of the
+ * index blocks, it will a negative error code.
+ *
+ * If start_hash is non-null, it will be filled in with the starting
+ * hash of the next page.
+ */
+static int ext4_htree_next_block(struct inode *dir, __u32 hash,
+                                struct dx_frame *frame,
+                                struct dx_frame *frames,
+                                __u32 *start_hash)
+{
+       struct dx_frame *p;
+       struct buffer_head *bh;
+       int err, num_frames = 0;
+       __u32 bhash;
+
+       p = frame;
+       /*
+        * Find the next leaf page by incrementing the frame pointer.
+        * If we run out of entries in the interior node, loop around and
+        * increment pointer in the parent node.  When we break out of
+        * this loop, num_frames indicates the number of interior
+        * nodes need to be read.
+        */
+       while (1) {
+               if (++(p->at) < p->entries + dx_get_count(p->entries))
+                       break;
+               if (p == frames)
+                       return 0;
+               num_frames++;
+               p--;
+       }
+
+       /*
+        * If the hash is 1, then continue only if the next page has a
+        * continuation hash of any value.  This is used for readdir
+        * handling.  Otherwise, check to see if the hash matches the
+        * desired contiuation hash.  If it doesn't, return since
+        * there's no point to read in the successive index pages.
+        */
+       bhash = dx_get_hash(p->at);
+       if (start_hash)
+               *start_hash = bhash;
+       if ((hash & 1) == 0) {
+               if ((bhash & ~1) != hash)
+                       return 0;
+       }
+       /*
+        * If the hash is HASH_NB_ALWAYS, we always go to the next
+        * block so no check is necessary
+        */
+       while (num_frames--) {
+               if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
+                                     0, &err)))
+                       return err; /* Failure */
+               p++;
+               brelse (p->bh);
+               p->bh = bh;
+               p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
+       }
+       return 1;
+}
+
+
+/*
+ * p is at least 6 bytes before the end of page
+ */
+static inline struct ext4_dir_entry_2 *ext4_next_entry(struct ext4_dir_entry_2 *p)
+{
+       return (struct ext4_dir_entry_2 *)((char*)p + le16_to_cpu(p->rec_len));
+}
+
+/*
+ * This function fills a red-black tree with information from a
+ * directory block.  It returns the number directory entries loaded
+ * into the tree.  If there is an error it is returned in err.
+ */
+static int htree_dirblock_to_tree(struct file *dir_file,
+                                 struct inode *dir, int block,
+                                 struct dx_hash_info *hinfo,
+                                 __u32 start_hash, __u32 start_minor_hash)
+{
+       struct buffer_head *bh;
+       struct ext4_dir_entry_2 *de, *top;
+       int err, count = 0;
+
+       dxtrace(printk("In htree dirblock_to_tree: block %d\n", block));
+       if (!(bh = ext4_bread (NULL, dir, block, 0, &err)))
+               return err;
+
+       de = (struct ext4_dir_entry_2 *) bh->b_data;
+       top = (struct ext4_dir_entry_2 *) ((char *) de +
+                                          dir->i_sb->s_blocksize -
+                                          EXT4_DIR_REC_LEN(0));
+       for (; de < top; de = ext4_next_entry(de)) {
+               ext4fs_dirhash(de->name, de->name_len, hinfo);
+               if ((hinfo->hash < start_hash) ||
+                   ((hinfo->hash == start_hash) &&
+                    (hinfo->minor_hash < start_minor_hash)))
+                       continue;
+               if (de->inode == 0)
+                       continue;
+               if ((err = ext4_htree_store_dirent(dir_file,
+                                  hinfo->hash, hinfo->minor_hash, de)) != 0) {
+                       brelse(bh);
+                       return err;
+               }
+               count++;
+       }
+       brelse(bh);
+       return count;
+}
+
+
+/*
+ * This function fills a red-black tree with information from a
+ * directory.  We start scanning the directory in hash order, starting
+ * at start_hash and start_minor_hash.
+ *
+ * This function returns the number of entries inserted into the tree,
+ * or a negative error code.
+ */
+int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+                        __u32 start_minor_hash, __u32 *next_hash)
+{
+       struct dx_hash_info hinfo;
+       struct ext4_dir_entry_2 *de;
+       struct dx_frame frames[2], *frame;
+       struct inode *dir;
+       int block, err;
+       int count = 0;
+       int ret;
+       __u32 hashval;
+
+       dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
+                      start_minor_hash));
+       dir = dir_file->f_dentry->d_inode;
+       if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
+               hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+               hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+               count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
+                                              start_hash, start_minor_hash);
+               *next_hash = ~0;
+               return count;
+       }
+       hinfo.hash = start_hash;
+       hinfo.minor_hash = 0;
+       frame = dx_probe(NULL, dir_file->f_dentry->d_inode, &hinfo, frames, &err);
+       if (!frame)
+               return err;
+
+       /* Add '.' and '..' from the htree header */
+       if (!start_hash && !start_minor_hash) {
+               de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
+               if ((err = ext4_htree_store_dirent(dir_file, 0, 0, de)) != 0)
+                       goto errout;
+               count++;
+       }
+       if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
+               de = (struct ext4_dir_entry_2 *) frames[0].bh->b_data;
+               de = ext4_next_entry(de);
+               if ((err = ext4_htree_store_dirent(dir_file, 2, 0, de)) != 0)
+                       goto errout;
+               count++;
+       }
+
+       while (1) {
+               block = dx_get_block(frame->at);
+               ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
+                                            start_hash, start_minor_hash);
+               if (ret < 0) {
+                       err = ret;
+                       goto errout;
+               }
+               count += ret;
+               hashval = ~0;
+               ret = ext4_htree_next_block(dir, HASH_NB_ALWAYS,
+                                           frame, frames, &hashval);
+               *next_hash = hashval;
+               if (ret < 0) {
+                       err = ret;
+                       goto errout;
+               }
+               /*
+                * Stop if:  (a) there are no more entries, or
+                * (b) we have inserted at least one entry and the
+                * next hash value is not a continuation
+                */
+               if ((ret == 0) ||
+                   (count && ((hashval & 1) == 0)))
+                       break;
+       }
+       dx_release(frames);
+       dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
+                      count, *next_hash));
+       return count;
+errout:
+       dx_release(frames);
+       return (err);
+}
+
+
+/*
+ * Directory block splitting, compacting
+ */
+
+static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
+                       struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
+{
+       int count = 0;
+       char *base = (char *) de;
+       struct dx_hash_info h = *hinfo;
+
+       while ((char *) de < base + size)
+       {
+               if (de->name_len && de->inode) {
+                       ext4fs_dirhash(de->name, de->name_len, &h);
+                       map_tail--;
+                       map_tail->hash = h.hash;
+                       map_tail->offs = (u32) ((char *) de - base);
+                       count++;
+                       cond_resched();
+               }
+               /* XXX: do we need to check rec_len == 0 case? -Chris */
+               de = (struct ext4_dir_entry_2 *) ((char *) de + le16_to_cpu(de->rec_len));
+       }
+       return count;
+}
+
+static void dx_sort_map (struct dx_map_entry *map, unsigned count)
+{
+       struct dx_map_entry *p, *q, *top = map + count - 1;
+       int more;
+       /* Combsort until bubble sort doesn't suck */
+       while (count > 2) {
+               count = count*10/13;
+               if (count - 9 < 2) /* 9, 10 -> 11 */
+                       count = 11;
+               for (p = top, q = p - count; q >= map; p--, q--)
+                       if (p->hash < q->hash)
+                               swap(*p, *q);
+       }
+       /* Garden variety bubble sort */
+       do {
+               more = 0;
+               q = top;
+               while (q-- > map) {
+                       if (q[1].hash >= q[0].hash)
+                               continue;
+                       swap(*(q+1), *q);
+                       more = 1;
+               }
+       } while(more);
+}
+
+static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
+{
+       struct dx_entry *entries = frame->entries;
+       struct dx_entry *old = frame->at, *new = old + 1;
+       int count = dx_get_count(entries);
+
+       assert(count < dx_get_limit(entries));
+       assert(old < entries + count);
+       memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
+       dx_set_hash(new, hash);
+       dx_set_block(new, block);
+       dx_set_count(entries, count + 1);
+}
+#endif
+
+
+static void ext4_update_dx_flag(struct inode *inode)
+{
+       if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+                                    EXT4_FEATURE_COMPAT_DIR_INDEX))
+               EXT4_I(inode)->i_flags &= ~EXT4_INDEX_FL;
+}
+
+/*
+ * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
+ *
+ * `len <= EXT4_NAME_LEN' is guaranteed by caller.
+ * `de != NULL' is guaranteed by caller.
+ */
+static inline int ext4_match (int len, const char * const name,
+                             struct ext4_dir_entry_2 * de)
+{
+       if (len != de->name_len)
+               return 0;
+       if (!de->inode)
+               return 0;
+       return !memcmp(name, de->name, len);
+}
+
+/*
+ * Returns 0 if not found, -1 on failure, and 1 on success
+ */
+static inline int search_dirblock(struct buffer_head * bh,
+                                 struct inode *dir,
+                                 struct dentry *dentry,
+                                 unsigned long offset,
+                                 struct ext4_dir_entry_2 ** res_dir)
+{
+       struct ext4_dir_entry_2 * de;
+       char * dlimit;
+       int de_len;
+       const char *name = dentry->d_name.name;
+       int namelen = dentry->d_name.len;
+
+       de = (struct ext4_dir_entry_2 *) bh->b_data;
+       dlimit = bh->b_data + dir->i_sb->s_blocksize;
+       while ((char *) de < dlimit) {
+               /* this code is executed quadratically often */
+               /* do minimal checking `by hand' */
+
+               if ((char *) de + namelen <= dlimit &&
+                   ext4_match (namelen, name, de)) {
+                       /* found a match - just to be sure, do a full check */
+                       if (!ext4_check_dir_entry("ext4_find_entry",
+                                                 dir, de, bh, offset))
+                               return -1;
+                       *res_dir = de;
+                       return 1;
+               }
+               /* prevent looping on a bad block */
+               de_len = le16_to_cpu(de->rec_len);
+               if (de_len <= 0)
+                       return -1;
+               offset += de_len;
+               de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
+       }
+       return 0;
+}
+
+
+/*
+ *     ext4_find_entry()
+ *
+ * finds an entry in the specified directory with the wanted name. It
+ * returns the cache buffer in which the entry was found, and the entry
+ * itself (as a parameter - res_dir). It does NOT read the inode of the
+ * entry - you'll have to do that yourself if you want to.
+ *
+ * The returned buffer_head has ->b_count elevated.  The caller is expected
+ * to brelse() it when appropriate.
+ */
+static struct buffer_head * ext4_find_entry (struct dentry *dentry,
+                                       struct ext4_dir_entry_2 ** res_dir)
+{
+       struct super_block * sb;
+       struct buffer_head * bh_use[NAMEI_RA_SIZE];
+       struct buffer_head * bh, *ret = NULL;
+       unsigned long start, block, b;
+       int ra_max = 0;         /* Number of bh's in the readahead
+                                  buffer, bh_use[] */
+       int ra_ptr = 0;         /* Current index into readahead
+                                  buffer */
+       int num = 0;
+       int nblocks, i, err;
+       struct inode *dir = dentry->d_parent->d_inode;
+       int namelen;
+       const u8 *name;
+       unsigned blocksize;
+
+       *res_dir = NULL;
+       sb = dir->i_sb;
+       blocksize = sb->s_blocksize;
+       namelen = dentry->d_name.len;
+       name = dentry->d_name.name;
+       if (namelen > EXT4_NAME_LEN)
+               return NULL;
+#ifdef CONFIG_EXT4_INDEX
+       if (is_dx(dir)) {
+               bh = ext4_dx_find_entry(dentry, res_dir, &err);
+               /*
+                * On success, or if the error was file not found,
+                * return.  Otherwise, fall back to doing a search the
+                * old fashioned way.
+                */
+               if (bh || (err != ERR_BAD_DX_DIR))
+                       return bh;
+               dxtrace(printk("ext4_find_entry: dx failed, falling back\n"));
+       }
+#endif
+       nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+       start = EXT4_I(dir)->i_dir_start_lookup;
+       if (start >= nblocks)
+               start = 0;
+       block = start;
+restart:
+       do {
+               /*
+                * We deal with the read-ahead logic here.
+                */
+               if (ra_ptr >= ra_max) {
+                       /* Refill the readahead buffer */
+                       ra_ptr = 0;
+                       b = block;
+                       for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
+                               /*
+                                * Terminate if we reach the end of the
+                                * directory and must wrap, or if our
+                                * search has finished at this block.
+                                */
+                               if (b >= nblocks || (num && block == start)) {
+                                       bh_use[ra_max] = NULL;
+                                       break;
+                               }
+                               num++;
+                               bh = ext4_getblk(NULL, dir, b++, 0, &err);
+                               bh_use[ra_max] = bh;
+                               if (bh)
+                                       ll_rw_block(READ_META, 1, &bh);
+                       }
+               }
+               if ((bh = bh_use[ra_ptr++]) == NULL)
+                       goto next;
+               wait_on_buffer(bh);
+               if (!buffer_uptodate(bh)) {
+                       /* read error, skip block & hope for the best */
+                       ext4_error(sb, __FUNCTION__, "reading directory #%lu "
+                                  "offset %lu", dir->i_ino, block);
+                       brelse(bh);
+                       goto next;
+               }
+               i = search_dirblock(bh, dir, dentry,
+                           block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
+               if (i == 1) {
+                       EXT4_I(dir)->i_dir_start_lookup = block;
+                       ret = bh;
+                       goto cleanup_and_exit;
+               } else {
+                       brelse(bh);
+                       if (i < 0)
+                               goto cleanup_and_exit;
+               }
+       next:
+               if (++block >= nblocks)
+                       block = 0;
+       } while (block != start);
+
+       /*
+        * If the directory has grown while we were searching, then
+        * search the last part of the directory before giving up.
+        */
+       block = nblocks;
+       nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
+       if (block < nblocks) {
+               start = 0;
+               goto restart;
+       }
+
+cleanup_and_exit:
+       /* Clean up the read-ahead blocks */
+       for (; ra_ptr < ra_max; ra_ptr++)
+               brelse (bh_use[ra_ptr]);
+       return ret;
+}
+
+#ifdef CONFIG_EXT4_INDEX
+static struct buffer_head * ext4_dx_find_entry(struct dentry *dentry,
+                      struct ext4_dir_entry_2 **res_dir, int *err)
+{
+       struct super_block * sb;
+       struct dx_hash_info     hinfo;
+       u32 hash;
+       struct dx_frame frames[2], *frame;
+       struct ext4_dir_entry_2 *de, *top;
+       struct buffer_head *bh;
+       unsigned long block;
+       int retval;
+       int namelen = dentry->d_name.len;
+       const u8 *name = dentry->d_name.name;
+       struct inode *dir = dentry->d_parent->d_inode;
+
+       sb = dir->i_sb;
+       /* NFS may look up ".." - look at dx_root directory block */
+       if (namelen > 2 || name[0] != '.'||(name[1] != '.' && name[1] != '\0')){
+               if (!(frame = dx_probe(dentry, NULL, &hinfo, frames, err)))
+                       return NULL;
+       } else {
+               frame = frames;
+               frame->bh = NULL;                       /* for dx_release() */
+               frame->at = (struct dx_entry *)frames;  /* hack for zero entry*/
+               dx_set_block(frame->at, 0);             /* dx_root block is 0 */
+       }
+       hash = hinfo.hash;
+       do {
+               block = dx_get_block(frame->at);
+               if (!(bh = ext4_bread (NULL,dir, block, 0, err)))
+                       goto errout;
+               de = (struct ext4_dir_entry_2 *) bh->b_data;
+               top = (struct ext4_dir_entry_2 *) ((char *) de + sb->s_blocksize -
+                                      EXT4_DIR_REC_LEN(0));
+               for (; de < top; de = ext4_next_entry(de))
+               if (ext4_match (namelen, name, de)) {
+                       if (!ext4_check_dir_entry("ext4_find_entry",
+                                                 dir, de, bh,
+                                 (block<<EXT4_BLOCK_SIZE_BITS(sb))
+                                         +((char *)de - bh->b_data))) {
+                               brelse (bh);
+                               goto errout;
+                       }
+                       *res_dir = de;
+                       dx_release (frames);
+                       return bh;
+               }
+               brelse (bh);
+               /* Check to see if we should continue to search */
+               retval = ext4_htree_next_block(dir, hash, frame,
+                                              frames, NULL);
+               if (retval < 0) {
+                       ext4_warning(sb, __FUNCTION__,
+                            "error reading index page in directory #%lu",
+                            dir->i_ino);
+                       *err = retval;
+                       goto errout;
+               }
+       } while (retval == 1);
+
+       *err = -ENOENT;
+errout:
+       dxtrace(printk("%s not found\n", name));
+       dx_release (frames);
+       return NULL;
+}
+#endif
+
+static struct dentry *ext4_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+{
+       struct inode * inode;
+       struct ext4_dir_entry_2 * de;
+       struct buffer_head * bh;
+
+       if (dentry->d_name.len > EXT4_NAME_LEN)
+               return ERR_PTR(-ENAMETOOLONG);
+
+       bh = ext4_find_entry(dentry, &de);
+       inode = NULL;
+       if (bh) {
+               unsigned long ino = le32_to_cpu(de->inode);
+               brelse (bh);
+               if (!ext4_valid_inum(dir->i_sb, ino)) {
+                       ext4_error(dir->i_sb, "ext4_lookup",
+                                  "bad inode number: %lu", ino);
+                       inode = NULL;
+               } else
+                       inode = iget(dir->i_sb, ino);
+
+               if (!inode)
+                       return ERR_PTR(-EACCES);
+       }
+       return d_splice_alias(inode, dentry);
+}
+
+
+struct dentry *ext4_get_parent(struct dentry *child)
+{
+       unsigned long ino;
+       struct dentry *parent;
+       struct inode *inode;
+       struct dentry dotdot;
+       struct ext4_dir_entry_2 * de;
+       struct buffer_head *bh;
+
+       dotdot.d_name.name = "..";
+       dotdot.d_name.len = 2;
+       dotdot.d_parent = child; /* confusing, isn't it! */
+
+       bh = ext4_find_entry(&dotdot, &de);
+       inode = NULL;
+       if (!bh)
+               return ERR_PTR(-ENOENT);
+       ino = le32_to_cpu(de->inode);
+       brelse(bh);
+
+       if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
+               ext4_error(child->d_inode->i_sb, "ext4_get_parent",
+                          "bad inode number: %lu", ino);
+               inode = NULL;
+       } else
+               inode = iget(child->d_inode->i_sb, ino);
+
+       if (!inode)
+               return ERR_PTR(-EACCES);
+
+       parent = d_alloc_anon(inode);
+       if (!parent) {
+               iput(inode);
+               parent = ERR_PTR(-ENOMEM);
+       }
+       return parent;
+}
+
+#define S_SHIFT 12
+static unsigned char ext4_type_by_mode[S_IFMT >> S_SHIFT] = {
+       [S_IFREG >> S_SHIFT]    = EXT4_FT_REG_FILE,
+       [S_IFDIR >> S_SHIFT]    = EXT4_FT_DIR,
+       [S_IFCHR >> S_SHIFT]    = EXT4_FT_CHRDEV,
+       [S_IFBLK >> S_SHIFT]    = EXT4_FT_BLKDEV,
+       [S_IFIFO >> S_SHIFT]    = EXT4_FT_FIFO,
+       [S_IFSOCK >> S_SHIFT]   = EXT4_FT_SOCK,
+       [S_IFLNK >> S_SHIFT]    = EXT4_FT_SYMLINK,
+};
+
+static inline void ext4_set_de_type(struct super_block *sb,
+                               struct ext4_dir_entry_2 *de,
+                               umode_t mode) {
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE))
+               de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
+}
+
+#ifdef CONFIG_EXT4_INDEX
+static struct ext4_dir_entry_2 *
+dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
+{
+       unsigned rec_len = 0;
+
+       while (count--) {
+               struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *) (from + map->offs);
+               rec_len = EXT4_DIR_REC_LEN(de->name_len);
+               memcpy (to, de, rec_len);
+               ((struct ext4_dir_entry_2 *) to)->rec_len =
+                               cpu_to_le16(rec_len);
+               de->inode = 0;
+               map++;
+               to += rec_len;
+       }
+       return (struct ext4_dir_entry_2 *) (to - rec_len);
+}
+
+static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size)
+{
+       struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
+       unsigned rec_len = 0;
+
+       prev = to = de;
+       while ((char*)de < base + size) {
+               next = (struct ext4_dir_entry_2 *) ((char *) de +
+                                                   le16_to_cpu(de->rec_len));
+               if (de->inode && de->name_len) {
+                       rec_len = EXT4_DIR_REC_LEN(de->name_len);
+                       if (de > to)
+                               memmove(to, de, rec_len);
+                       to->rec_len = cpu_to_le16(rec_len);
+                       prev = to;
+                       to = (struct ext4_dir_entry_2 *) (((char *) to) + rec_len);
+               }
+               de = next;
+       }
+       return prev;
+}
+
+static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
+                       struct buffer_head **bh,struct dx_frame *frame,
+                       struct dx_hash_info *hinfo, int *error)
+{
+       unsigned blocksize = dir->i_sb->s_blocksize;
+       unsigned count, continued;
+       struct buffer_head *bh2;
+       u32 newblock;
+       u32 hash2;
+       struct dx_map_entry *map;
+       char *data1 = (*bh)->b_data, *data2;
+       unsigned split;
+       struct ext4_dir_entry_2 *de = NULL, *de2;
+       int     err;
+
+       bh2 = ext4_append (handle, dir, &newblock, error);
+       if (!(bh2)) {
+               brelse(*bh);
+               *bh = NULL;
+               goto errout;
+       }
+
+       BUFFER_TRACE(*bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, *bh);
+       if (err) {
+       journal_error:
+               brelse(*bh);
+               brelse(bh2);
+               *bh = NULL;
+               ext4_std_error(dir->i_sb, err);
+               goto errout;
+       }
+       BUFFER_TRACE(frame->bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, frame->bh);
+       if (err)
+               goto journal_error;
+
+       data2 = bh2->b_data;
+
+       /* create map in the end of data2 block */
+       map = (struct dx_map_entry *) (data2 + blocksize);
+       count = dx_make_map ((struct ext4_dir_entry_2 *) data1,
+                            blocksize, hinfo, map);
+       map -= count;
+       split = count/2; // need to adjust to actual middle
+       dx_sort_map (map, count);
+       hash2 = map[split].hash;
+       continued = hash2 == map[split - 1].hash;
+       dxtrace(printk("Split block %i at %x, %i/%i\n",
+               dx_get_block(frame->at), hash2, split, count-split));
+
+       /* Fancy dance to stay within two buffers */
+       de2 = dx_move_dirents(data1, data2, map + split, count - split);
+       de = dx_pack_dirents(data1,blocksize);
+       de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
+       de2->rec_len = cpu_to_le16(data2 + blocksize - (char *) de2);
+       dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
+       dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
+
+       /* Which block gets the new entry? */
+       if (hinfo->hash >= hash2)
+       {
+               swap(*bh, bh2);
+               de = de2;
+       }
+       dx_insert_block (frame, hash2 + continued, newblock);
+       err = ext4_journal_dirty_metadata (handle, bh2);
+       if (err)
+               goto journal_error;
+       err = ext4_journal_dirty_metadata (handle, frame->bh);
+       if (err)
+               goto journal_error;
+       brelse (bh2);
+       dxtrace(dx_show_index ("frame", frame->entries));
+errout:
+       return de;
+}
+#endif
+
+
+/*
+ * Add a new entry into a directory (leaf) block.  If de is non-NULL,
+ * it points to a directory entry which is guaranteed to be large
+ * enough for new directory entry.  If de is NULL, then
+ * add_dirent_to_buf will attempt search the directory block for
+ * space.  It will return -ENOSPC if no space is available, and -EIO
+ * and -EEXIST if directory entry already exists.
+ *
+ * NOTE!  bh is NOT released in the case where ENOSPC is returned.  In
+ * all other cases bh is released.
+ */
+static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
+                            struct inode *inode, struct ext4_dir_entry_2 *de,
+                            struct buffer_head * bh)
+{
+       struct inode    *dir = dentry->d_parent->d_inode;
+       const char      *name = dentry->d_name.name;
+       int             namelen = dentry->d_name.len;
+       unsigned long   offset = 0;
+       unsigned short  reclen;
+       int             nlen, rlen, err;
+       char            *top;
+
+       reclen = EXT4_DIR_REC_LEN(namelen);
+       if (!de) {
+               de = (struct ext4_dir_entry_2 *)bh->b_data;
+               top = bh->b_data + dir->i_sb->s_blocksize - reclen;
+               while ((char *) de <= top) {
+                       if (!ext4_check_dir_entry("ext4_add_entry", dir, de,
+                                                 bh, offset)) {
+                               brelse (bh);
+                               return -EIO;
+                       }
+                       if (ext4_match (namelen, name, de)) {
+                               brelse (bh);
+                               return -EEXIST;
+                       }
+                       nlen = EXT4_DIR_REC_LEN(de->name_len);
+                       rlen = le16_to_cpu(de->rec_len);
+                       if ((de->inode? rlen - nlen: rlen) >= reclen)
+                               break;
+                       de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
+                       offset += rlen;
+               }
+               if ((char *) de > top)
+                       return -ENOSPC;
+       }
+       BUFFER_TRACE(bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, bh);
+       if (err) {
+               ext4_std_error(dir->i_sb, err);
+               brelse(bh);
+               return err;
+       }
+
+       /* By now the buffer is marked for journaling */
+       nlen = EXT4_DIR_REC_LEN(de->name_len);
+       rlen = le16_to_cpu(de->rec_len);
+       if (de->inode) {
+               struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen);
+               de1->rec_len = cpu_to_le16(rlen - nlen);
+               de->rec_len = cpu_to_le16(nlen);
+               de = de1;
+       }
+       de->file_type = EXT4_FT_UNKNOWN;
+       if (inode) {
+               de->inode = cpu_to_le32(inode->i_ino);
+               ext4_set_de_type(dir->i_sb, de, inode->i_mode);
+       } else
+               de->inode = 0;
+       de->name_len = namelen;
+       memcpy (de->name, name, namelen);
+       /*
+        * XXX shouldn't update any times until successful
+        * completion of syscall, but too many callers depend
+        * on this.
+        *
+        * XXX similarly, too many callers depend on
+        * ext4_new_inode() setting the times, but error
+        * recovery deletes the inode, so the worst that can
+        * happen is that the times are slightly out of date
+        * and/or different from the directory change time.
+        */
+       dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
+       ext4_update_dx_flag(dir);
+       dir->i_version++;
+       ext4_mark_inode_dirty(handle, dir);
+       BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
+       err = ext4_journal_dirty_metadata(handle, bh);
+       if (err)
+               ext4_std_error(dir->i_sb, err);
+       brelse(bh);
+       return 0;
+}
+
+#ifdef CONFIG_EXT4_INDEX
+/*
+ * This converts a one block unindexed directory to a 3 block indexed
+ * directory, and adds the dentry to the indexed directory.
+ */
+static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
+                           struct inode *inode, struct buffer_head *bh)
+{
+       struct inode    *dir = dentry->d_parent->d_inode;
+       const char      *name = dentry->d_name.name;
+       int             namelen = dentry->d_name.len;
+       struct buffer_head *bh2;
+       struct dx_root  *root;
+       struct dx_frame frames[2], *frame;
+       struct dx_entry *entries;
+       struct ext4_dir_entry_2 *de, *de2;
+       char            *data1, *top;
+       unsigned        len;
+       int             retval;
+       unsigned        blocksize;
+       struct dx_hash_info hinfo;
+       u32             block;
+       struct fake_dirent *fde;
+
+       blocksize =  dir->i_sb->s_blocksize;
+       dxtrace(printk("Creating index\n"));
+       retval = ext4_journal_get_write_access(handle, bh);
+       if (retval) {
+               ext4_std_error(dir->i_sb, retval);
+               brelse(bh);
+               return retval;
+       }
+       root = (struct dx_root *) bh->b_data;
+
+       bh2 = ext4_append (handle, dir, &block, &retval);
+       if (!(bh2)) {
+               brelse(bh);
+               return retval;
+       }
+       EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
+       data1 = bh2->b_data;
+
+       /* The 0th block becomes the root, move the dirents out */
+       fde = &root->dotdot;
+       de = (struct ext4_dir_entry_2 *)((char *)fde + le16_to_cpu(fde->rec_len));
+       len = ((char *) root) + blocksize - (char *) de;
+       memcpy (data1, de, len);
+       de = (struct ext4_dir_entry_2 *) data1;
+       top = data1 + len;
+       while ((char *)(de2=(void*)de+le16_to_cpu(de->rec_len)) < top)
+               de = de2;
+       de->rec_len = cpu_to_le16(data1 + blocksize - (char *) de);
+       /* Initialize the root; the dot dirents already exist */
+       de = (struct ext4_dir_entry_2 *) (&root->dotdot);
+       de->rec_len = cpu_to_le16(blocksize - EXT4_DIR_REC_LEN(2));
+       memset (&root->info, 0, sizeof(root->info));
+       root->info.info_length = sizeof(root->info);
+       root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+       entries = root->entries;
+       dx_set_block (entries, 1);
+       dx_set_count (entries, 1);
+       dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
+
+       /* Initialize as for dx_probe */
+       hinfo.hash_version = root->info.hash_version;
+       hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+       ext4fs_dirhash(name, namelen, &hinfo);
+       frame = frames;
+       frame->entries = entries;
+       frame->at = entries;
+       frame->bh = bh;
+       bh = bh2;
+       de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
+       dx_release (frames);
+       if (!(de))
+               return retval;
+
+       return add_dirent_to_buf(handle, dentry, inode, de, bh);
+}
+#endif
+
+/*
+ *     ext4_add_entry()
+ *
+ * adds a file entry to the specified directory, using the same
+ * semantics as ext4_find_entry(). It returns NULL if it failed.
+ *
+ * NOTE!! The inode part of 'de' is left at 0 - which means you
+ * may not sleep between calling this and putting something into
+ * the entry, as someone else might have used it while you slept.
+ */
+static int ext4_add_entry (handle_t *handle, struct dentry *dentry,
+       struct inode *inode)
+{
+       struct inode *dir = dentry->d_parent->d_inode;
+       unsigned long offset;
+       struct buffer_head * bh;
+       struct ext4_dir_entry_2 *de;
+       struct super_block * sb;
+       int     retval;
+#ifdef CONFIG_EXT4_INDEX
+       int     dx_fallback=0;
+#endif
+       unsigned blocksize;
+       u32 block, blocks;
+
+       sb = dir->i_sb;
+       blocksize = sb->s_blocksize;
+       if (!dentry->d_name.len)
+               return -EINVAL;
+#ifdef CONFIG_EXT4_INDEX
+       if (is_dx(dir)) {
+               retval = ext4_dx_add_entry(handle, dentry, inode);
+               if (!retval || (retval != ERR_BAD_DX_DIR))
+                       return retval;
+               EXT4_I(dir)->i_flags &= ~EXT4_INDEX_FL;
+               dx_fallback++;
+               ext4_mark_inode_dirty(handle, dir);
+       }
+#endif
+       blocks = dir->i_size >> sb->s_blocksize_bits;
+       for (block = 0, offset = 0; block < blocks; block++) {
+               bh = ext4_bread(handle, dir, block, 0, &retval);
+               if(!bh)
+                       return retval;
+               retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+               if (retval != -ENOSPC)
+                       return retval;
+
+#ifdef CONFIG_EXT4_INDEX
+               if (blocks == 1 && !dx_fallback &&
+                   EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
+                       return make_indexed_dir(handle, dentry, inode, bh);
+#endif
+               brelse(bh);
+       }
+       bh = ext4_append(handle, dir, &block, &retval);
+       if (!bh)
+               return retval;
+       de = (struct ext4_dir_entry_2 *) bh->b_data;
+       de->inode = 0;
+       de->rec_len = cpu_to_le16(blocksize);
+       return add_dirent_to_buf(handle, dentry, inode, de, bh);
+}
+
+#ifdef CONFIG_EXT4_INDEX
+/*
+ * Returns 0 for success, or a negative error value
+ */
+static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
+                            struct inode *inode)
+{
+       struct dx_frame frames[2], *frame;
+       struct dx_entry *entries, *at;
+       struct dx_hash_info hinfo;
+       struct buffer_head * bh;
+       struct inode *dir = dentry->d_parent->d_inode;
+       struct super_block * sb = dir->i_sb;
+       struct ext4_dir_entry_2 *de;
+       int err;
+
+       frame = dx_probe(dentry, NULL, &hinfo, frames, &err);
+       if (!frame)
+               return err;
+       entries = frame->entries;
+       at = frame->at;
+
+       if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
+               goto cleanup;
+
+       BUFFER_TRACE(bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, bh);
+       if (err)
+               goto journal_error;
+
+       err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
+       if (err != -ENOSPC) {
+               bh = NULL;
+               goto cleanup;
+       }
+
+       /* Block full, should compress but for now just split */
+       dxtrace(printk("using %u of %u node entries\n",
+                      dx_get_count(entries), dx_get_limit(entries)));
+       /* Need to split index? */
+       if (dx_get_count(entries) == dx_get_limit(entries)) {
+               u32 newblock;
+               unsigned icount = dx_get_count(entries);
+               int levels = frame - frames;
+               struct dx_entry *entries2;
+               struct dx_node *node2;
+               struct buffer_head *bh2;
+
+               if (levels && (dx_get_count(frames->entries) ==
+                              dx_get_limit(frames->entries))) {
+                       ext4_warning(sb, __FUNCTION__,
+                                    "Directory index full!");
+                       err = -ENOSPC;
+                       goto cleanup;
+               }
+               bh2 = ext4_append (handle, dir, &newblock, &err);
+               if (!(bh2))
+                       goto cleanup;
+               node2 = (struct dx_node *)(bh2->b_data);
+               entries2 = node2->entries;
+               node2->fake.rec_len = cpu_to_le16(sb->s_blocksize);
+               node2->fake.inode = 0;
+               BUFFER_TRACE(frame->bh, "get_write_access");
+               err = ext4_journal_get_write_access(handle, frame->bh);
+               if (err)
+                       goto journal_error;
+               if (levels) {
+                       unsigned icount1 = icount/2, icount2 = icount - icount1;
+                       unsigned hash2 = dx_get_hash(entries + icount1);
+                       dxtrace(printk("Split index %i/%i\n", icount1, icount2));
+
+                       BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
+                       err = ext4_journal_get_write_access(handle,
+                                                            frames[0].bh);
+                       if (err)
+                               goto journal_error;
+
+                       memcpy ((char *) entries2, (char *) (entries + icount1),
+                               icount2 * sizeof(struct dx_entry));
+                       dx_set_count (entries, icount1);
+                       dx_set_count (entries2, icount2);
+                       dx_set_limit (entries2, dx_node_limit(dir));
+
+                       /* Which index block gets the new entry? */
+                       if (at - entries >= icount1) {
+                               frame->at = at = at - entries - icount1 + entries2;
+                               frame->entries = entries = entries2;
+                               swap(frame->bh, bh2);
+                       }
+                       dx_insert_block (frames + 0, hash2, newblock);
+                       dxtrace(dx_show_index ("node", frames[1].entries));
+                       dxtrace(dx_show_index ("node",
+                              ((struct dx_node *) bh2->b_data)->entries));
+                       err = ext4_journal_dirty_metadata(handle, bh2);
+                       if (err)
+                               goto journal_error;
+                       brelse (bh2);
+               } else {
+                       dxtrace(printk("Creating second level index...\n"));
+                       memcpy((char *) entries2, (char *) entries,
+                              icount * sizeof(struct dx_entry));
+                       dx_set_limit(entries2, dx_node_limit(dir));
+
+                       /* Set up root */
+                       dx_set_count(entries, 1);
+                       dx_set_block(entries + 0, newblock);
+                       ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
+
+                       /* Add new access path frame */
+                       frame = frames + 1;
+                       frame->at = at = at - entries + entries2;
+                       frame->entries = entries = entries2;
+                       frame->bh = bh2;
+                       err = ext4_journal_get_write_access(handle,
+                                                            frame->bh);
+                       if (err)
+                               goto journal_error;
+               }
+               ext4_journal_dirty_metadata(handle, frames[0].bh);
+       }
+       de = do_split(handle, dir, &bh, frame, &hinfo, &err);
+       if (!de)
+               goto cleanup;
+       err = add_dirent_to_buf(handle, dentry, inode, de, bh);
+       bh = NULL;
+       goto cleanup;
+
+journal_error:
+       ext4_std_error(dir->i_sb, err);
+cleanup:
+       if (bh)
+               brelse(bh);
+       dx_release(frames);
+       return err;
+}
+#endif
+
+/*
+ * ext4_delete_entry deletes a directory entry by merging it with the
+ * previous entry
+ */
+static int ext4_delete_entry (handle_t *handle,
+                             struct inode * dir,
+                             struct ext4_dir_entry_2 * de_del,
+                             struct buffer_head * bh)
+{
+       struct ext4_dir_entry_2 * de, * pde;
+       int i;
+
+       i = 0;
+       pde = NULL;
+       de = (struct ext4_dir_entry_2 *) bh->b_data;
+       while (i < bh->b_size) {
+               if (!ext4_check_dir_entry("ext4_delete_entry", dir, de, bh, i))
+                       return -EIO;
+               if (de == de_del)  {
+                       BUFFER_TRACE(bh, "get_write_access");
+                       ext4_journal_get_write_access(handle, bh);
+                       if (pde)
+                               pde->rec_len =
+                                       cpu_to_le16(le16_to_cpu(pde->rec_len) +
+                                                   le16_to_cpu(de->rec_len));
+                       else
+                               de->inode = 0;
+                       dir->i_version++;
+                       BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
+                       ext4_journal_dirty_metadata(handle, bh);
+                       return 0;
+               }
+               i += le16_to_cpu(de->rec_len);
+               pde = de;
+               de = (struct ext4_dir_entry_2 *)
+                       ((char *) de + le16_to_cpu(de->rec_len));
+       }
+       return -ENOENT;
+}
+
+/*
+ * ext4_mark_inode_dirty is somewhat expensive, so unlike ext2 we
+ * do not perform it in these functions.  We perform it at the call site,
+ * if it is needed.
+ */
+static inline void ext4_inc_count(handle_t *handle, struct inode *inode)
+{
+       inc_nlink(inode);
+}
+
+static inline void ext4_dec_count(handle_t *handle, struct inode *inode)
+{
+       drop_nlink(inode);
+}
+
+static int ext4_add_nondir(handle_t *handle,
+               struct dentry *dentry, struct inode *inode)
+{
+       int err = ext4_add_entry(handle, dentry, inode);
+       if (!err) {
+               ext4_mark_inode_dirty(handle, inode);
+               d_instantiate(dentry, inode);
+               return 0;
+       }
+       ext4_dec_count(handle, inode);
+       iput(inode);
+       return err;
+}
+
+/*
+ * By the time this is called, we already have created
+ * the directory cache entry for the new file, but it
+ * is so far negative - it has no inode.
+ *
+ * If the create succeeds, we fill in the inode information
+ * with d_instantiate().
+ */
+static int ext4_create (struct inode * dir, struct dentry * dentry, int mode,
+               struct nameidata *nd)
+{
+       handle_t *handle;
+       struct inode * inode;
+       int err, retries = 0;
+
+retry:
+       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+                                       2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       if (IS_DIRSYNC(dir))
+               handle->h_sync = 1;
+
+       inode = ext4_new_inode (handle, dir, mode);
+       err = PTR_ERR(inode);
+       if (!IS_ERR(inode)) {
+               inode->i_op = &ext4_file_inode_operations;
+               inode->i_fop = &ext4_file_operations;
+               ext4_set_aops(inode);
+               err = ext4_add_nondir(handle, dentry, inode);
+       }
+       ext4_journal_stop(handle);
+       if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
+               goto retry;
+       return err;
+}
+
+static int ext4_mknod (struct inode * dir, struct dentry *dentry,
+                       int mode, dev_t rdev)
+{
+       handle_t *handle;
+       struct inode *inode;
+       int err, retries = 0;
+
+       if (!new_valid_dev(rdev))
+               return -EINVAL;
+
+retry:
+       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+                                       2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       if (IS_DIRSYNC(dir))
+               handle->h_sync = 1;
+
+       inode = ext4_new_inode (handle, dir, mode);
+       err = PTR_ERR(inode);
+       if (!IS_ERR(inode)) {
+               init_special_inode(inode, inode->i_mode, rdev);
+#ifdef CONFIG_EXT4DEV_FS_XATTR
+               inode->i_op = &ext4_special_inode_operations;
+#endif
+               err = ext4_add_nondir(handle, dentry, inode);
+       }
+       ext4_journal_stop(handle);
+       if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
+               goto retry;
+       return err;
+}
+
+static int ext4_mkdir(struct inode * dir, struct dentry * dentry, int mode)
+{
+       handle_t *handle;
+       struct inode * inode;
+       struct buffer_head * dir_block;
+       struct ext4_dir_entry_2 * de;
+       int err, retries = 0;
+
+       if (dir->i_nlink >= EXT4_LINK_MAX)
+               return -EMLINK;
+
+retry:
+       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+                                       2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       if (IS_DIRSYNC(dir))
+               handle->h_sync = 1;
+
+       inode = ext4_new_inode (handle, dir, S_IFDIR | mode);
+       err = PTR_ERR(inode);
+       if (IS_ERR(inode))
+               goto out_stop;
+
+       inode->i_op = &ext4_dir_inode_operations;
+       inode->i_fop = &ext4_dir_operations;
+       inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+       dir_block = ext4_bread (handle, inode, 0, 1, &err);
+       if (!dir_block) {
+               drop_nlink(inode); /* is this nlink == 0? */
+               ext4_mark_inode_dirty(handle, inode);
+               iput (inode);
+               goto out_stop;
+       }
+       BUFFER_TRACE(dir_block, "get_write_access");
+       ext4_journal_get_write_access(handle, dir_block);
+       de = (struct ext4_dir_entry_2 *) dir_block->b_data;
+       de->inode = cpu_to_le32(inode->i_ino);
+       de->name_len = 1;
+       de->rec_len = cpu_to_le16(EXT4_DIR_REC_LEN(de->name_len));
+       strcpy (de->name, ".");
+       ext4_set_de_type(dir->i_sb, de, S_IFDIR);
+       de = (struct ext4_dir_entry_2 *)
+                       ((char *) de + le16_to_cpu(de->rec_len));
+       de->inode = cpu_to_le32(dir->i_ino);
+       de->rec_len = cpu_to_le16(inode->i_sb->s_blocksize-EXT4_DIR_REC_LEN(1));
+       de->name_len = 2;
+       strcpy (de->name, "..");
+       ext4_set_de_type(dir->i_sb, de, S_IFDIR);
+       inode->i_nlink = 2;
+       BUFFER_TRACE(dir_block, "call ext4_journal_dirty_metadata");
+       ext4_journal_dirty_metadata(handle, dir_block);
+       brelse (dir_block);
+       ext4_mark_inode_dirty(handle, inode);
+       err = ext4_add_entry (handle, dentry, inode);
+       if (err) {
+               inode->i_nlink = 0;
+               ext4_mark_inode_dirty(handle, inode);
+               iput (inode);
+               goto out_stop;
+       }
+       inc_nlink(dir);
+       ext4_update_dx_flag(dir);
+       ext4_mark_inode_dirty(handle, dir);
+       d_instantiate(dentry, inode);
+out_stop:
+       ext4_journal_stop(handle);
+       if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
+               goto retry;
+       return err;
+}
+
+/*
+ * routine to check that the specified directory is empty (for rmdir)
+ */
+static int empty_dir (struct inode * inode)
+{
+       unsigned long offset;
+       struct buffer_head * bh;
+       struct ext4_dir_entry_2 * de, * de1;
+       struct super_block * sb;
+       int err = 0;
+
+       sb = inode->i_sb;
+       if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
+           !(bh = ext4_bread (NULL, inode, 0, 0, &err))) {
+               if (err)
+                       ext4_error(inode->i_sb, __FUNCTION__,
+                                  "error %d reading directory #%lu offset 0",
+                                  err, inode->i_ino);
+               else
+                       ext4_warning(inode->i_sb, __FUNCTION__,
+                                    "bad directory (dir #%lu) - no data block",
+                                    inode->i_ino);
+               return 1;
+       }
+       de = (struct ext4_dir_entry_2 *) bh->b_data;
+       de1 = (struct ext4_dir_entry_2 *)
+                       ((char *) de + le16_to_cpu(de->rec_len));
+       if (le32_to_cpu(de->inode) != inode->i_ino ||
+                       !le32_to_cpu(de1->inode) ||
+                       strcmp (".", de->name) ||
+                       strcmp ("..", de1->name)) {
+               ext4_warning (inode->i_sb, "empty_dir",
+                             "bad directory (dir #%lu) - no `.' or `..'",
+                             inode->i_ino);
+               brelse (bh);
+               return 1;
+       }
+       offset = le16_to_cpu(de->rec_len) + le16_to_cpu(de1->rec_len);
+       de = (struct ext4_dir_entry_2 *)
+                       ((char *) de1 + le16_to_cpu(de1->rec_len));
+       while (offset < inode->i_size ) {
+               if (!bh ||
+                       (void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
+                       err = 0;
+                       brelse (bh);
+                       bh = ext4_bread (NULL, inode,
+                               offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
+                       if (!bh) {
+                               if (err)
+                                       ext4_error(sb, __FUNCTION__,
+                                                  "error %d reading directory"
+                                                  " #%lu offset %lu",
+                                                  err, inode->i_ino, offset);
+                               offset += sb->s_blocksize;
+                               continue;
+                       }
+                       de = (struct ext4_dir_entry_2 *) bh->b_data;
+               }
+               if (!ext4_check_dir_entry("empty_dir", inode, de, bh, offset)) {
+                       de = (struct ext4_dir_entry_2 *)(bh->b_data +
+                                                        sb->s_blocksize);
+                       offset = (offset | (sb->s_blocksize - 1)) + 1;
+                       continue;
+               }
+               if (le32_to_cpu(de->inode)) {
+                       brelse (bh);
+                       return 0;
+               }
+               offset += le16_to_cpu(de->rec_len);
+               de = (struct ext4_dir_entry_2 *)
+                               ((char *) de + le16_to_cpu(de->rec_len));
+       }
+       brelse (bh);
+       return 1;
+}
+
+/* ext4_orphan_add() links an unlinked or truncated inode into a list of
+ * such inodes, starting at the superblock, in case we crash before the
+ * file is closed/deleted, or in case the inode truncate spans multiple
+ * transactions and the last transaction is not recovered after a crash.
+ *
+ * At filesystem recovery time, we walk this list deleting unlinked
+ * inodes and truncating linked inodes in ext4_orphan_cleanup().
+ */
+int ext4_orphan_add(handle_t *handle, struct inode *inode)
+{
+       struct super_block *sb = inode->i_sb;
+       struct ext4_iloc iloc;
+       int err = 0, rc;
+
+       lock_super(sb);
+       if (!list_empty(&EXT4_I(inode)->i_orphan))
+               goto out_unlock;
+
+       /* Orphan handling is only valid for files with data blocks
+        * being truncated, or files being unlinked. */
+
+       /* @@@ FIXME: Observation from aviro:
+        * I think I can trigger J_ASSERT in ext4_orphan_add().  We block
+        * here (on lock_super()), so race with ext4_link() which might bump
+        * ->i_nlink. For, say it, character device. Not a regular file,
+        * not a directory, not a symlink and ->i_nlink > 0.
+        */
+       J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+               S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
+
+       BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+       if (err)
+               goto out_unlock;
+
+       err = ext4_reserve_inode_write(handle, inode, &iloc);
+       if (err)
+               goto out_unlock;
+
+       /* Insert this inode at the head of the on-disk orphan list... */
+       NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
+       EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
+       err = ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
+       rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
+       if (!err)
+               err = rc;
+
+       /* Only add to the head of the in-memory list if all the
+        * previous operations succeeded.  If the orphan_add is going to
+        * fail (possibly taking the journal offline), we can't risk
+        * leaving the inode on the orphan list: stray orphan-list
+        * entries can cause panics at unmount time.
+        *
+        * This is safe: on error we're going to ignore the orphan list
+        * anyway on the next recovery. */
+       if (!err)
+               list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
+
+       jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
+       jbd_debug(4, "orphan inode %lu will point to %d\n",
+                       inode->i_ino, NEXT_ORPHAN(inode));
+out_unlock:
+       unlock_super(sb);
+       ext4_std_error(inode->i_sb, err);
+       return err;
+}
+
+/*
+ * ext4_orphan_del() removes an unlinked or truncated inode from the list
+ * of such inodes stored on disk, because it is finally being cleaned up.
+ */
+int ext4_orphan_del(handle_t *handle, struct inode *inode)
+{
+       struct list_head *prev;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_sb_info *sbi;
+       unsigned long ino_next;
+       struct ext4_iloc iloc;
+       int err = 0;
+
+       lock_super(inode->i_sb);
+       if (list_empty(&ei->i_orphan)) {
+               unlock_super(inode->i_sb);
+               return 0;
+       }
+
+       ino_next = NEXT_ORPHAN(inode);
+       prev = ei->i_orphan.prev;
+       sbi = EXT4_SB(inode->i_sb);
+
+       jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
+
+       list_del_init(&ei->i_orphan);
+
+       /* If we're on an error path, we may not have a valid
+        * transaction handle with which to update the orphan list on
+        * disk, but we still need to remove the inode from the linked
+        * list in memory. */
+       if (!handle)
+               goto out;
+
+       err = ext4_reserve_inode_write(handle, inode, &iloc);
+       if (err)
+               goto out_err;
+
+       if (prev == &sbi->s_orphan) {
+               jbd_debug(4, "superblock will point to %lu\n", ino_next);
+               BUFFER_TRACE(sbi->s_sbh, "get_write_access");
+               err = ext4_journal_get_write_access(handle, sbi->s_sbh);
+               if (err)
+                       goto out_brelse;
+               sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
+               err = ext4_journal_dirty_metadata(handle, sbi->s_sbh);
+       } else {
+               struct ext4_iloc iloc2;
+               struct inode *i_prev =
+                       &list_entry(prev, struct ext4_inode_info, i_orphan)->vfs_inode;
+
+               jbd_debug(4, "orphan inode %lu will point to %lu\n",
+                         i_prev->i_ino, ino_next);
+               err = ext4_reserve_inode_write(handle, i_prev, &iloc2);
+               if (err)
+                       goto out_brelse;
+               NEXT_ORPHAN(i_prev) = ino_next;
+               err = ext4_mark_iloc_dirty(handle, i_prev, &iloc2);
+       }
+       if (err)
+               goto out_brelse;
+       NEXT_ORPHAN(inode) = 0;
+       err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+
+out_err:
+       ext4_std_error(inode->i_sb, err);
+out:
+       unlock_super(inode->i_sb);
+       return err;
+
+out_brelse:
+       brelse(iloc.bh);
+       goto out_err;
+}
+
+static int ext4_rmdir (struct inode * dir, struct dentry *dentry)
+{
+       int retval;
+       struct inode * inode;
+       struct buffer_head * bh;
+       struct ext4_dir_entry_2 * de;
+       handle_t *handle;
+
+       /* Initialize quotas before so that eventual writes go in
+        * separate transaction */
+       DQUOT_INIT(dentry->d_inode);
+       handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       retval = -ENOENT;
+       bh = ext4_find_entry (dentry, &de);
+       if (!bh)
+               goto end_rmdir;
+
+       if (IS_DIRSYNC(dir))
+               handle->h_sync = 1;
+
+       inode = dentry->d_inode;
+
+       retval = -EIO;
+       if (le32_to_cpu(de->inode) != inode->i_ino)
+               goto end_rmdir;
+
+       retval = -ENOTEMPTY;
+       if (!empty_dir (inode))
+               goto end_rmdir;
+
+       retval = ext4_delete_entry(handle, dir, de, bh);
+       if (retval)
+               goto end_rmdir;
+       if (inode->i_nlink != 2)
+               ext4_warning (inode->i_sb, "ext4_rmdir",
+                             "empty directory has nlink!=2 (%d)",
+                             inode->i_nlink);
+       inode->i_version++;
+       clear_nlink(inode);
+       /* There's no need to set i_disksize: the fact that i_nlink is
+        * zero will ensure that the right thing happens during any
+        * recovery. */
+       inode->i_size = 0;
+       ext4_orphan_add(handle, inode);
+       inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+       ext4_mark_inode_dirty(handle, inode);
+       drop_nlink(dir);
+       ext4_update_dx_flag(dir);
+       ext4_mark_inode_dirty(handle, dir);
+
+end_rmdir:
+       ext4_journal_stop(handle);
+       brelse (bh);
+       return retval;
+}
+
+static int ext4_unlink(struct inode * dir, struct dentry *dentry)
+{
+       int retval;
+       struct inode * inode;
+       struct buffer_head * bh;
+       struct ext4_dir_entry_2 * de;
+       handle_t *handle;
+
+       /* Initialize quotas before so that eventual writes go
+        * in separate transaction */
+       DQUOT_INIT(dentry->d_inode);
+       handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       if (IS_DIRSYNC(dir))
+               handle->h_sync = 1;
+
+       retval = -ENOENT;
+       bh = ext4_find_entry (dentry, &de);
+       if (!bh)
+               goto end_unlink;
+
+       inode = dentry->d_inode;
+
+       retval = -EIO;
+       if (le32_to_cpu(de->inode) != inode->i_ino)
+               goto end_unlink;
+
+       if (!inode->i_nlink) {
+               ext4_warning (inode->i_sb, "ext4_unlink",
+                             "Deleting nonexistent file (%lu), %d",
+                             inode->i_ino, inode->i_nlink);
+               inode->i_nlink = 1;
+       }
+       retval = ext4_delete_entry(handle, dir, de, bh);
+       if (retval)
+               goto end_unlink;
+       dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+       ext4_update_dx_flag(dir);
+       ext4_mark_inode_dirty(handle, dir);
+       drop_nlink(inode);
+       if (!inode->i_nlink)
+               ext4_orphan_add(handle, inode);
+       inode->i_ctime = dir->i_ctime;
+       ext4_mark_inode_dirty(handle, inode);
+       retval = 0;
+
+end_unlink:
+       ext4_journal_stop(handle);
+       brelse (bh);
+       return retval;
+}
+
+static int ext4_symlink (struct inode * dir,
+               struct dentry *dentry, const char * symname)
+{
+       handle_t *handle;
+       struct inode * inode;
+       int l, err, retries = 0;
+
+       l = strlen(symname)+1;
+       if (l > dir->i_sb->s_blocksize)
+               return -ENAMETOOLONG;
+
+retry:
+       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 +
+                                       2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       if (IS_DIRSYNC(dir))
+               handle->h_sync = 1;
+
+       inode = ext4_new_inode (handle, dir, S_IFLNK|S_IRWXUGO);
+       err = PTR_ERR(inode);
+       if (IS_ERR(inode))
+               goto out_stop;
+
+       if (l > sizeof (EXT4_I(inode)->i_data)) {
+               inode->i_op = &ext4_symlink_inode_operations;
+               ext4_set_aops(inode);
+               /*
+                * page_symlink() calls into ext4_prepare/commit_write.
+                * We have a transaction open.  All is sweetness.  It also sets
+                * i_size in generic_commit_write().
+                */
+               err = __page_symlink(inode, symname, l,
+                               mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+               if (err) {
+                       ext4_dec_count(handle, inode);
+                       ext4_mark_inode_dirty(handle, inode);
+                       iput (inode);
+                       goto out_stop;
+               }
+       } else {
+               inode->i_op = &ext4_fast_symlink_inode_operations;
+               memcpy((char*)&EXT4_I(inode)->i_data,symname,l);
+               inode->i_size = l-1;
+       }
+       EXT4_I(inode)->i_disksize = inode->i_size;
+       err = ext4_add_nondir(handle, dentry, inode);
+out_stop:
+       ext4_journal_stop(handle);
+       if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
+               goto retry;
+       return err;
+}
+
+static int ext4_link (struct dentry * old_dentry,
+               struct inode * dir, struct dentry *dentry)
+{
+       handle_t *handle;
+       struct inode *inode = old_dentry->d_inode;
+       int err, retries = 0;
+
+       if (inode->i_nlink >= EXT4_LINK_MAX)
+               return -EMLINK;
+
+retry:
+       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       if (IS_DIRSYNC(dir))
+               handle->h_sync = 1;
+
+       inode->i_ctime = CURRENT_TIME_SEC;
+       ext4_inc_count(handle, inode);
+       atomic_inc(&inode->i_count);
+
+       err = ext4_add_nondir(handle, dentry, inode);
+       ext4_journal_stop(handle);
+       if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
+               goto retry;
+       return err;
+}
+
+#define PARENT_INO(buffer) \
+       ((struct ext4_dir_entry_2 *) ((char *) buffer + \
+       le16_to_cpu(((struct ext4_dir_entry_2 *) buffer)->rec_len)))->inode
+
+/*
+ * Anybody can rename anything with this: the permission checks are left to the
+ * higher-level routines.
+ */
+static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
+                          struct inode * new_dir,struct dentry *new_dentry)
+{
+       handle_t *handle;
+       struct inode * old_inode, * new_inode;
+       struct buffer_head * old_bh, * new_bh, * dir_bh;
+       struct ext4_dir_entry_2 * old_de, * new_de;
+       int retval;
+
+       old_bh = new_bh = dir_bh = NULL;
+
+       /* Initialize quotas before so that eventual writes go
+        * in separate transaction */
+       if (new_dentry->d_inode)
+               DQUOT_INIT(new_dentry->d_inode);
+       handle = ext4_journal_start(old_dir, 2 *
+                                       EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
+                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+               handle->h_sync = 1;
+
+       old_bh = ext4_find_entry (old_dentry, &old_de);
+       /*
+        *  Check for inode number is _not_ due to possible IO errors.
+        *  We might rmdir the source, keep it as pwd of some process
+        *  and merrily kill the link to whatever was created under the
+        *  same name. Goodbye sticky bit ;-<
+        */
+       old_inode = old_dentry->d_inode;
+       retval = -ENOENT;
+       if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
+               goto end_rename;
+
+       new_inode = new_dentry->d_inode;
+       new_bh = ext4_find_entry (new_dentry, &new_de);
+       if (new_bh) {
+               if (!new_inode) {
+                       brelse (new_bh);
+                       new_bh = NULL;
+               }
+       }
+       if (S_ISDIR(old_inode->i_mode)) {
+               if (new_inode) {
+                       retval = -ENOTEMPTY;
+                       if (!empty_dir (new_inode))
+                               goto end_rename;
+               }
+               retval = -EIO;
+               dir_bh = ext4_bread (handle, old_inode, 0, 0, &retval);
+               if (!dir_bh)
+                       goto end_rename;
+               if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
+                       goto end_rename;
+               retval = -EMLINK;
+               if (!new_inode && new_dir!=old_dir &&
+                               new_dir->i_nlink >= EXT4_LINK_MAX)
+                       goto end_rename;
+       }
+       if (!new_bh) {
+               retval = ext4_add_entry (handle, new_dentry, old_inode);
+               if (retval)
+                       goto end_rename;
+       } else {
+               BUFFER_TRACE(new_bh, "get write access");
+               ext4_journal_get_write_access(handle, new_bh);
+               new_de->inode = cpu_to_le32(old_inode->i_ino);
+               if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
+                                             EXT4_FEATURE_INCOMPAT_FILETYPE))
+                       new_de->file_type = old_de->file_type;
+               new_dir->i_version++;
+               BUFFER_TRACE(new_bh, "call ext4_journal_dirty_metadata");
+               ext4_journal_dirty_metadata(handle, new_bh);
+               brelse(new_bh);
+               new_bh = NULL;
+       }
+
+       /*
+        * Like most other Unix systems, set the ctime for inodes on a
+        * rename.
+        */
+       old_inode->i_ctime = CURRENT_TIME_SEC;
+       ext4_mark_inode_dirty(handle, old_inode);
+
+       /*
+        * ok, that's it
+        */
+       if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
+           old_de->name_len != old_dentry->d_name.len ||
+           strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
+           (retval = ext4_delete_entry(handle, old_dir,
+                                       old_de, old_bh)) == -ENOENT) {
+               /* old_de could have moved from under us during htree split, so
+                * make sure that we are deleting the right entry.  We might
+                * also be pointing to a stale entry in the unused part of
+                * old_bh so just checking inum and the name isn't enough. */
+               struct buffer_head *old_bh2;
+               struct ext4_dir_entry_2 *old_de2;
+
+               old_bh2 = ext4_find_entry(old_dentry, &old_de2);
+               if (old_bh2) {
+                       retval = ext4_delete_entry(handle, old_dir,
+                                                  old_de2, old_bh2);
+                       brelse(old_bh2);
+               }
+       }
+       if (retval) {
+               ext4_warning(old_dir->i_sb, "ext4_rename",
+                               "Deleting old file (%lu), %d, error=%d",
+                               old_dir->i_ino, old_dir->i_nlink, retval);
+       }
+
+       if (new_inode) {
+               drop_nlink(new_inode);
+               new_inode->i_ctime = CURRENT_TIME_SEC;
+       }
+       old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
+       ext4_update_dx_flag(old_dir);
+       if (dir_bh) {
+               BUFFER_TRACE(dir_bh, "get_write_access");
+               ext4_journal_get_write_access(handle, dir_bh);
+               PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
+               BUFFER_TRACE(dir_bh, "call ext4_journal_dirty_metadata");
+               ext4_journal_dirty_metadata(handle, dir_bh);
+               drop_nlink(old_dir);
+               if (new_inode) {
+                       drop_nlink(new_inode);
+               } else {
+                       inc_nlink(new_dir);
+                       ext4_update_dx_flag(new_dir);
+                       ext4_mark_inode_dirty(handle, new_dir);
+               }
+       }
+       ext4_mark_inode_dirty(handle, old_dir);
+       if (new_inode) {
+               ext4_mark_inode_dirty(handle, new_inode);
+               if (!new_inode->i_nlink)
+                       ext4_orphan_add(handle, new_inode);
+       }
+       retval = 0;
+
+end_rename:
+       brelse (dir_bh);
+       brelse (old_bh);
+       brelse (new_bh);
+       ext4_journal_stop(handle);
+       return retval;
+}
+
+/*
+ * directories can handle most operations...
+ */
+struct inode_operations ext4_dir_inode_operations = {
+       .create         = ext4_create,
+       .lookup         = ext4_lookup,
+       .link           = ext4_link,
+       .unlink         = ext4_unlink,
+       .symlink        = ext4_symlink,
+       .mkdir          = ext4_mkdir,
+       .rmdir          = ext4_rmdir,
+       .mknod          = ext4_mknod,
+       .rename         = ext4_rename,
+       .setattr        = ext4_setattr,
+#ifdef CONFIG_EXT4DEV_FS_XATTR
+       .setxattr       = generic_setxattr,
+       .getxattr       = generic_getxattr,
+       .listxattr      = ext4_listxattr,
+       .removexattr    = generic_removexattr,
+#endif
+       .permission     = ext4_permission,
+};
+
+struct inode_operations ext4_special_inode_operations = {
+       .setattr        = ext4_setattr,
+#ifdef CONFIG_EXT4DEV_FS_XATTR
+       .setxattr       = generic_setxattr,
+       .getxattr       = generic_getxattr,
+       .listxattr      = ext4_listxattr,
+       .removexattr    = generic_removexattr,
+#endif
+       .permission     = ext4_permission,
+};
diff --git a/fs/ext4/namei.h b/fs/ext4/namei.h
new file mode 100644 (file)
index 0000000..5e4dfff
--- /dev/null
@@ -0,0 +1,8 @@
+/*  linux/fs/ext4/namei.h
+ *
+ * Copyright (C) 2005 Simtec Electronics
+ *     Ben Dooks <ben@simtec.co.uk>
+ *
+*/
+
+extern struct dentry *ext4_get_parent(struct dentry *child);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
new file mode 100644 (file)
index 0000000..1e95780
--- /dev/null
@@ -0,0 +1,1045 @@
+/*
+ *  linux/fs/ext4/resize.c
+ *
+ * Support for resizing an ext4 filesystem while it is mounted.
+ *
+ * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
+ *
+ * This could probably be made into a module, because it is not often in use.
+ */
+
+
+#define EXT4FS_DEBUG
+
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/ext4_jbd2.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+
+#define outside(b, first, last)        ((b) < (first) || (b) >= (last))
+#define inside(b, first, last) ((b) >= (first) && (b) < (last))
+
+static int verify_group_input(struct super_block *sb,
+                             struct ext4_new_group_data *input)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       ext4_fsblk_t start = ext4_blocks_count(es);
+       ext4_fsblk_t end = start + input->blocks_count;
+       unsigned group = input->group;
+       ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
+       unsigned overhead = ext4_bg_has_super(sb, group) ?
+               (1 + ext4_bg_num_gdb(sb, group) +
+                le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
+       ext4_fsblk_t metaend = start + overhead;
+       struct buffer_head *bh = NULL;
+       ext4_grpblk_t free_blocks_count, offset;
+       int err = -EINVAL;
+
+       input->free_blocks_count = free_blocks_count =
+               input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
+
+       if (test_opt(sb, DEBUG))
+               printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
+                      "(%d free, %u reserved)\n",
+                      ext4_bg_has_super(sb, input->group) ? "normal" :
+                      "no-super", input->group, input->blocks_count,
+                      free_blocks_count, input->reserved_blocks);
+
+       ext4_get_group_no_and_offset(sb, start, NULL, &offset);
+       if (group != sbi->s_groups_count)
+               ext4_warning(sb, __FUNCTION__,
+                            "Cannot add at group %u (only %lu groups)",
+                            input->group, sbi->s_groups_count);
+       else if (offset != 0)
+                       ext4_warning(sb, __FUNCTION__, "Last group not full");
+       else if (input->reserved_blocks > input->blocks_count / 5)
+               ext4_warning(sb, __FUNCTION__, "Reserved blocks too high (%u)",
+                            input->reserved_blocks);
+       else if (free_blocks_count < 0)
+               ext4_warning(sb, __FUNCTION__, "Bad blocks count %u",
+                            input->blocks_count);
+       else if (!(bh = sb_bread(sb, end - 1)))
+               ext4_warning(sb, __FUNCTION__,
+                            "Cannot read last block (%llu)",
+                            end - 1);
+       else if (outside(input->block_bitmap, start, end))
+               ext4_warning(sb, __FUNCTION__,
+                            "Block bitmap not in group (block %llu)",
+                            input->block_bitmap);
+       else if (outside(input->inode_bitmap, start, end))
+               ext4_warning(sb, __FUNCTION__,
+                            "Inode bitmap not in group (block %llu)",
+                            input->inode_bitmap);
+       else if (outside(input->inode_table, start, end) ||
+                outside(itend - 1, start, end))
+               ext4_warning(sb, __FUNCTION__,
+                            "Inode table not in group (blocks %llu-%llu)",
+                            input->inode_table, itend - 1);
+       else if (input->inode_bitmap == input->block_bitmap)
+               ext4_warning(sb, __FUNCTION__,
+                            "Block bitmap same as inode bitmap (%llu)",
+                            input->block_bitmap);
+       else if (inside(input->block_bitmap, input->inode_table, itend))
+               ext4_warning(sb, __FUNCTION__,
+                            "Block bitmap (%llu) in inode table (%llu-%llu)",
+                            input->block_bitmap, input->inode_table, itend-1);
+       else if (inside(input->inode_bitmap, input->inode_table, itend))
+               ext4_warning(sb, __FUNCTION__,
+                            "Inode bitmap (%llu) in inode table (%llu-%llu)",
+                            input->inode_bitmap, input->inode_table, itend-1);
+       else if (inside(input->block_bitmap, start, metaend))
+               ext4_warning(sb, __FUNCTION__,
+                            "Block bitmap (%llu) in GDT table"
+                            " (%llu-%llu)",
+                            input->block_bitmap, start, metaend - 1);
+       else if (inside(input->inode_bitmap, start, metaend))
+               ext4_warning(sb, __FUNCTION__,
+                            "Inode bitmap (%llu) in GDT table"
+                            " (%llu-%llu)",
+                            input->inode_bitmap, start, metaend - 1);
+       else if (inside(input->inode_table, start, metaend) ||
+                inside(itend - 1, start, metaend))
+               ext4_warning(sb, __FUNCTION__,
+                            "Inode table (%llu-%llu) overlaps"
+                            "GDT table (%llu-%llu)",
+                            input->inode_table, itend - 1, start, metaend - 1);
+       else
+               err = 0;
+       brelse(bh);
+
+       return err;
+}
+
+static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
+                                 ext4_fsblk_t blk)
+{
+       struct buffer_head *bh;
+       int err;
+
+       bh = sb_getblk(sb, blk);
+       if (!bh)
+               return ERR_PTR(-EIO);
+       if ((err = ext4_journal_get_write_access(handle, bh))) {
+               brelse(bh);
+               bh = ERR_PTR(err);
+       } else {
+               lock_buffer(bh);
+               memset(bh->b_data, 0, sb->s_blocksize);
+               set_buffer_uptodate(bh);
+               unlock_buffer(bh);
+       }
+
+       return bh;
+}
+
+/*
+ * To avoid calling the atomic setbit hundreds or thousands of times, we only
+ * need to use it within a single byte (to ensure we get endianness right).
+ * We can use memset for the rest of the bitmap as there are no other users.
+ */
+static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
+{
+       int i;
+
+       if (start_bit >= end_bit)
+               return;
+
+       ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
+       for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
+               ext4_set_bit(i, bitmap);
+       if (i < end_bit)
+               memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
+}
+
+/*
+ * Set up the block and inode bitmaps, and the inode table for the new group.
+ * This doesn't need to be part of the main transaction, since we are only
+ * changing blocks outside the actual filesystem.  We still do journaling to
+ * ensure the recovery is correct in case of a failure just after resize.
+ * If any part of this fails, we simply abort the resize.
+ */
+static int setup_new_group_blocks(struct super_block *sb,
+                                 struct ext4_new_group_data *input)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       ext4_fsblk_t start = ext4_group_first_block_no(sb, input->group);
+       int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
+               le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
+       unsigned long gdblocks = ext4_bg_num_gdb(sb, input->group);
+       struct buffer_head *bh;
+       handle_t *handle;
+       ext4_fsblk_t block;
+       ext4_grpblk_t bit;
+       int i;
+       int err = 0, err2;
+
+       handle = ext4_journal_start_sb(sb, reserved_gdb + gdblocks +
+                                      2 + sbi->s_itb_per_group);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       lock_super(sb);
+       if (input->group != sbi->s_groups_count) {
+               err = -EBUSY;
+               goto exit_journal;
+       }
+
+       if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
+               err = PTR_ERR(bh);
+               goto exit_journal;
+       }
+
+       if (ext4_bg_has_super(sb, input->group)) {
+               ext4_debug("mark backup superblock %#04lx (+0)\n", start);
+               ext4_set_bit(0, bh->b_data);
+       }
+
+       /* Copy all of the GDT blocks into the backup in this group */
+       for (i = 0, bit = 1, block = start + 1;
+            i < gdblocks; i++, block++, bit++) {
+               struct buffer_head *gdb;
+
+               ext4_debug("update backup group %#04lx (+%d)\n", block, bit);
+
+               gdb = sb_getblk(sb, block);
+               if (!gdb) {
+                       err = -EIO;
+                       goto exit_bh;
+               }
+               if ((err = ext4_journal_get_write_access(handle, gdb))) {
+                       brelse(gdb);
+                       goto exit_bh;
+               }
+               lock_buffer(bh);
+               memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, bh->b_size);
+               set_buffer_uptodate(gdb);
+               unlock_buffer(bh);
+               ext4_journal_dirty_metadata(handle, gdb);
+               ext4_set_bit(bit, bh->b_data);
+               brelse(gdb);
+       }
+
+       /* Zero out all of the reserved backup group descriptor table blocks */
+       for (i = 0, bit = gdblocks + 1, block = start + bit;
+            i < reserved_gdb; i++, block++, bit++) {
+               struct buffer_head *gdb;
+
+               ext4_debug("clear reserved block %#04lx (+%d)\n", block, bit);
+
+               if (IS_ERR(gdb = bclean(handle, sb, block))) {
+                       err = PTR_ERR(bh);
+                       goto exit_bh;
+               }
+               ext4_journal_dirty_metadata(handle, gdb);
+               ext4_set_bit(bit, bh->b_data);
+               brelse(gdb);
+       }
+       ext4_debug("mark block bitmap %#04x (+%ld)\n", input->block_bitmap,
+                  input->block_bitmap - start);
+       ext4_set_bit(input->block_bitmap - start, bh->b_data);
+       ext4_debug("mark inode bitmap %#04x (+%ld)\n", input->inode_bitmap,
+                  input->inode_bitmap - start);
+       ext4_set_bit(input->inode_bitmap - start, bh->b_data);
+
+       /* Zero out all of the inode table blocks */
+       for (i = 0, block = input->inode_table, bit = block - start;
+            i < sbi->s_itb_per_group; i++, bit++, block++) {
+               struct buffer_head *it;
+
+               ext4_debug("clear inode block %#04lx (+%d)\n", block, bit);
+               if (IS_ERR(it = bclean(handle, sb, block))) {
+                       err = PTR_ERR(it);
+                       goto exit_bh;
+               }
+               ext4_journal_dirty_metadata(handle, it);
+               brelse(it);
+               ext4_set_bit(bit, bh->b_data);
+       }
+       mark_bitmap_end(input->blocks_count, EXT4_BLOCKS_PER_GROUP(sb),
+                       bh->b_data);
+       ext4_journal_dirty_metadata(handle, bh);
+       brelse(bh);
+
+       /* Mark unused entries in inode bitmap used */
+       ext4_debug("clear inode bitmap %#04x (+%ld)\n",
+                  input->inode_bitmap, input->inode_bitmap - start);
+       if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
+               err = PTR_ERR(bh);
+               goto exit_journal;
+       }
+
+       mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
+                       bh->b_data);
+       ext4_journal_dirty_metadata(handle, bh);
+exit_bh:
+       brelse(bh);
+
+exit_journal:
+       unlock_super(sb);
+       if ((err2 = ext4_journal_stop(handle)) && !err)
+               err = err2;
+
+       return err;
+}
+
+
+/*
+ * Iterate through the groups which hold BACKUP superblock/GDT copies in an
+ * ext4 filesystem.  The counters should be initialized to 1, 5, and 7 before
+ * calling this for the first time.  In a sparse filesystem it will be the
+ * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
+ * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
+ */
+static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
+                                 unsigned *five, unsigned *seven)
+{
+       unsigned *min = three;
+       int mult = 3;
+       unsigned ret;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
+               ret = *min;
+               *min += 1;
+               return ret;
+       }
+
+       if (*five < *min) {
+               min = five;
+               mult = 5;
+       }
+       if (*seven < *min) {
+               min = seven;
+               mult = 7;
+       }
+
+       ret = *min;
+       *min *= mult;
+
+       return ret;
+}
+
+/*
+ * Check that all of the backup GDT blocks are held in the primary GDT block.
+ * It is assumed that they are stored in group order.  Returns the number of
+ * groups in current filesystem that have BACKUPS, or -ve error code.
+ */
+static int verify_reserved_gdb(struct super_block *sb,
+                              struct buffer_head *primary)
+{
+       const ext4_fsblk_t blk = primary->b_blocknr;
+       const unsigned long end = EXT4_SB(sb)->s_groups_count;
+       unsigned three = 1;
+       unsigned five = 5;
+       unsigned seven = 7;
+       unsigned grp;
+       __le32 *p = (__le32 *)primary->b_data;
+       int gdbackups = 0;
+
+       while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
+               if (le32_to_cpu(*p++) !=
+                   grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
+                       ext4_warning(sb, __FUNCTION__,
+                                    "reserved GDT %llu"
+                                    " missing grp %d (%llu)",
+                                    blk, grp,
+                                    grp *
+                                    (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
+                                    blk);
+                       return -EINVAL;
+               }
+               if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
+                       return -EFBIG;
+       }
+
+       return gdbackups;
+}
+
+/*
+ * Called when we need to bring a reserved group descriptor table block into
+ * use from the resize inode.  The primary copy of the new GDT block currently
+ * is an indirect block (under the double indirect block in the resize inode).
+ * The new backup GDT blocks will be stored as leaf blocks in this indirect
+ * block, in group order.  Even though we know all the block numbers we need,
+ * we check to ensure that the resize inode has actually reserved these blocks.
+ *
+ * Don't need to update the block bitmaps because the blocks are still in use.
+ *
+ * We get all of the error cases out of the way, so that we are sure to not
+ * fail once we start modifying the data on disk, because JBD has no rollback.
+ */
+static int add_new_gdb(handle_t *handle, struct inode *inode,
+                      struct ext4_new_group_data *input,
+                      struct buffer_head **primary)
+{
+       struct super_block *sb = inode->i_sb;
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+       unsigned long gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
+       ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
+       struct buffer_head **o_group_desc, **n_group_desc;
+       struct buffer_head *dind;
+       int gdbackups;
+       struct ext4_iloc iloc;
+       __le32 *data;
+       int err;
+
+       if (test_opt(sb, DEBUG))
+               printk(KERN_DEBUG
+                      "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
+                      gdb_num);
+
+       /*
+        * If we are not using the primary superblock/GDT copy don't resize,
+        * because the user tools have no way of handling this.  Probably a
+        * bad time to do it anyways.
+        */
+       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+           le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
+               ext4_warning(sb, __FUNCTION__,
+                       "won't resize using backup superblock at %llu",
+                       (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
+               return -EPERM;
+       }
+
+       *primary = sb_bread(sb, gdblock);
+       if (!*primary)
+               return -EIO;
+
+       if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
+               err = gdbackups;
+               goto exit_bh;
+       }
+
+       data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
+       dind = sb_bread(sb, le32_to_cpu(*data));
+       if (!dind) {
+               err = -EIO;
+               goto exit_bh;
+       }
+
+       data = (__le32 *)dind->b_data;
+       if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
+               ext4_warning(sb, __FUNCTION__,
+                            "new group %u GDT block %llu not reserved",
+                            input->group, gdblock);
+               err = -EINVAL;
+               goto exit_dind;
+       }
+
+       if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh)))
+               goto exit_dind;
+
+       if ((err = ext4_journal_get_write_access(handle, *primary)))
+               goto exit_sbh;
+
+       if ((err = ext4_journal_get_write_access(handle, dind)))
+               goto exit_primary;
+
+       /* ext4_reserve_inode_write() gets a reference on the iloc */
+       if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
+               goto exit_dindj;
+
+       n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
+                       GFP_KERNEL);
+       if (!n_group_desc) {
+               err = -ENOMEM;
+               ext4_warning (sb, __FUNCTION__,
+                             "not enough memory for %lu groups", gdb_num + 1);
+               goto exit_inode;
+       }
+
+       /*
+        * Finally, we have all of the possible failures behind us...
+        *
+        * Remove new GDT block from inode double-indirect block and clear out
+        * the new GDT block for use (which also "frees" the backup GDT blocks
+        * from the reserved inode).  We don't need to change the bitmaps for
+        * these blocks, because they are marked as in-use from being in the
+        * reserved inode, and will become GDT blocks (primary and backup).
+        */
+       data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
+       ext4_journal_dirty_metadata(handle, dind);
+       brelse(dind);
+       inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
+       ext4_mark_iloc_dirty(handle, inode, &iloc);
+       memset((*primary)->b_data, 0, sb->s_blocksize);
+       ext4_journal_dirty_metadata(handle, *primary);
+
+       o_group_desc = EXT4_SB(sb)->s_group_desc;
+       memcpy(n_group_desc, o_group_desc,
+              EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
+       n_group_desc[gdb_num] = *primary;
+       EXT4_SB(sb)->s_group_desc = n_group_desc;
+       EXT4_SB(sb)->s_gdb_count++;
+       kfree(o_group_desc);
+
+       es->s_reserved_gdt_blocks =
+               cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
+       ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
+
+       return 0;
+
+exit_inode:
+       //ext4_journal_release_buffer(handle, iloc.bh);
+       brelse(iloc.bh);
+exit_dindj:
+       //ext4_journal_release_buffer(handle, dind);
+exit_primary:
+       //ext4_journal_release_buffer(handle, *primary);
+exit_sbh:
+       //ext4_journal_release_buffer(handle, *primary);
+exit_dind:
+       brelse(dind);
+exit_bh:
+       brelse(*primary);
+
+       ext4_debug("leaving with error %d\n", err);
+       return err;
+}
+
+/*
+ * Called when we are adding a new group which has a backup copy of each of
+ * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
+ * We need to add these reserved backup GDT blocks to the resize inode, so
+ * that they are kept for future resizing and not allocated to files.
+ *
+ * Each reserved backup GDT block will go into a different indirect block.
+ * The indirect blocks are actually the primary reserved GDT blocks,
+ * so we know in advance what their block numbers are.  We only get the
+ * double-indirect block to verify it is pointing to the primary reserved
+ * GDT blocks so we don't overwrite a data block by accident.  The reserved
+ * backup GDT blocks are stored in their reserved primary GDT block.
+ */
+static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
+                             struct ext4_new_group_data *input)
+{
+       struct super_block *sb = inode->i_sb;
+       int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
+       struct buffer_head **primary;
+       struct buffer_head *dind;
+       struct ext4_iloc iloc;
+       ext4_fsblk_t blk;
+       __le32 *data, *end;
+       int gdbackups = 0;
+       int res, i;
+       int err;
+
+       primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_KERNEL);
+       if (!primary)
+               return -ENOMEM;
+
+       data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
+       dind = sb_bread(sb, le32_to_cpu(*data));
+       if (!dind) {
+               err = -EIO;
+               goto exit_free;
+       }
+
+       blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
+       data = (__le32 *)dind->b_data + EXT4_SB(sb)->s_gdb_count;
+       end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
+
+       /* Get each reserved primary GDT block and verify it holds backups */
+       for (res = 0; res < reserved_gdb; res++, blk++) {
+               if (le32_to_cpu(*data) != blk) {
+                       ext4_warning(sb, __FUNCTION__,
+                                    "reserved block %llu"
+                                    " not at offset %ld",
+                                    blk,
+                                    (long)(data - (__le32 *)dind->b_data));
+                       err = -EINVAL;
+                       goto exit_bh;
+               }
+               primary[res] = sb_bread(sb, blk);
+               if (!primary[res]) {
+                       err = -EIO;
+                       goto exit_bh;
+               }
+               if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
+                       brelse(primary[res]);
+                       err = gdbackups;
+                       goto exit_bh;
+               }
+               if (++data >= end)
+                       data = (__le32 *)dind->b_data;
+       }
+
+       for (i = 0; i < reserved_gdb; i++) {
+               if ((err = ext4_journal_get_write_access(handle, primary[i]))) {
+                       /*
+                       int j;
+                       for (j = 0; j < i; j++)
+                               ext4_journal_release_buffer(handle, primary[j]);
+                        */
+                       goto exit_bh;
+               }
+       }
+
+       if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
+               goto exit_bh;
+
+       /*
+        * Finally we can add each of the reserved backup GDT blocks from
+        * the new group to its reserved primary GDT block.
+        */
+       blk = input->group * EXT4_BLOCKS_PER_GROUP(sb);
+       for (i = 0; i < reserved_gdb; i++) {
+               int err2;
+               data = (__le32 *)primary[i]->b_data;
+               /* printk("reserving backup %lu[%u] = %lu\n",
+                      primary[i]->b_blocknr, gdbackups,
+                      blk + primary[i]->b_blocknr); */
+               data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
+               err2 = ext4_journal_dirty_metadata(handle, primary[i]);
+               if (!err)
+                       err = err2;
+       }
+       inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
+       ext4_mark_iloc_dirty(handle, inode, &iloc);
+
+exit_bh:
+       while (--res >= 0)
+               brelse(primary[res]);
+       brelse(dind);
+
+exit_free:
+       kfree(primary);
+
+       return err;
+}
+
+/*
+ * Update the backup copies of the ext4 metadata.  These don't need to be part
+ * of the main resize transaction, because e2fsck will re-write them if there
+ * is a problem (basically only OOM will cause a problem).  However, we
+ * _should_ update the backups if possible, in case the primary gets trashed
+ * for some reason and we need to run e2fsck from a backup superblock.  The
+ * important part is that the new block and inode counts are in the backup
+ * superblocks, and the location of the new group metadata in the GDT backups.
+ *
+ * We do not need lock_super() for this, because these blocks are not
+ * otherwise touched by the filesystem code when it is mounted.  We don't
+ * need to worry about last changing from sbi->s_groups_count, because the
+ * worst that can happen is that we do not copy the full number of backups
+ * at this time.  The resize which changed s_groups_count will backup again.
+ */
+static void update_backups(struct super_block *sb,
+                          int blk_off, char *data, int size)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       const unsigned long last = sbi->s_groups_count;
+       const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
+       unsigned three = 1;
+       unsigned five = 5;
+       unsigned seven = 7;
+       unsigned group;
+       int rest = sb->s_blocksize - size;
+       handle_t *handle;
+       int err = 0, err2;
+
+       handle = ext4_journal_start_sb(sb, EXT4_MAX_TRANS_DATA);
+       if (IS_ERR(handle)) {
+               group = 1;
+               err = PTR_ERR(handle);
+               goto exit_err;
+       }
+
+       while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) {
+               struct buffer_head *bh;
+
+               /* Out of journal space, and can't get more - abort - so sad */
+               if (handle->h_buffer_credits == 0 &&
+                   ext4_journal_extend(handle, EXT4_MAX_TRANS_DATA) &&
+                   (err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
+                       break;
+
+               bh = sb_getblk(sb, group * bpg + blk_off);
+               if (!bh) {
+                       err = -EIO;
+                       break;
+               }
+               ext4_debug("update metadata backup %#04lx\n",
+                         (unsigned long)bh->b_blocknr);
+               if ((err = ext4_journal_get_write_access(handle, bh)))
+                       break;
+               lock_buffer(bh);
+               memcpy(bh->b_data, data, size);
+               if (rest)
+                       memset(bh->b_data + size, 0, rest);
+               set_buffer_uptodate(bh);
+               unlock_buffer(bh);
+               ext4_journal_dirty_metadata(handle, bh);
+               brelse(bh);
+       }
+       if ((err2 = ext4_journal_stop(handle)) && !err)
+               err = err2;
+
+       /*
+        * Ugh! Need to have e2fsck write the backup copies.  It is too
+        * late to revert the resize, we shouldn't fail just because of
+        * the backup copies (they are only needed in case of corruption).
+        *
+        * However, if we got here we have a journal problem too, so we
+        * can't really start a transaction to mark the superblock.
+        * Chicken out and just set the flag on the hope it will be written
+        * to disk, and if not - we will simply wait until next fsck.
+        */
+exit_err:
+       if (err) {
+               ext4_warning(sb, __FUNCTION__,
+                            "can't update backup for group %d (err %d), "
+                            "forcing fsck on next reboot", group, err);
+               sbi->s_mount_state &= ~EXT4_VALID_FS;
+               sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
+               mark_buffer_dirty(sbi->s_sbh);
+       }
+}
+
+/* Add group descriptor data to an existing or new group descriptor block.
+ * Ensure we handle all possible error conditions _before_ we start modifying
+ * the filesystem, because we cannot abort the transaction and not have it
+ * write the data to disk.
+ *
+ * If we are on a GDT block boundary, we need to get the reserved GDT block.
+ * Otherwise, we may need to add backup GDT blocks for a sparse group.
+ *
+ * We only need to hold the superblock lock while we are actually adding
+ * in the new group's counts to the superblock.  Prior to that we have
+ * not really "added" the group at all.  We re-check that we are still
+ * adding in the last group in case things have changed since verifying.
+ */
+int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
+               le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
+       struct buffer_head *primary = NULL;
+       struct ext4_group_desc *gdp;
+       struct inode *inode = NULL;
+       handle_t *handle;
+       int gdb_off, gdb_num;
+       int err, err2;
+
+       gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb);
+       gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
+
+       if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
+               ext4_warning(sb, __FUNCTION__,
+                            "Can't resize non-sparse filesystem further");
+               return -EPERM;
+       }
+
+       if (ext4_blocks_count(es) + input->blocks_count <
+           ext4_blocks_count(es)) {
+               ext4_warning(sb, __FUNCTION__, "blocks_count overflow\n");
+               return -EINVAL;
+       }
+
+       if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
+           le32_to_cpu(es->s_inodes_count)) {
+               ext4_warning(sb, __FUNCTION__, "inodes_count overflow\n");
+               return -EINVAL;
+       }
+
+       if (reserved_gdb || gdb_off == 0) {
+               if (!EXT4_HAS_COMPAT_FEATURE(sb,
+                                            EXT4_FEATURE_COMPAT_RESIZE_INODE)){
+                       ext4_warning(sb, __FUNCTION__,
+                                    "No reserved GDT blocks, can't resize");
+                       return -EPERM;
+               }
+               inode = iget(sb, EXT4_RESIZE_INO);
+               if (!inode || is_bad_inode(inode)) {
+                       ext4_warning(sb, __FUNCTION__,
+                                    "Error opening resize inode");
+                       iput(inode);
+                       return -ENOENT;
+               }
+       }
+
+       if ((err = verify_group_input(sb, input)))
+               goto exit_put;
+
+       if ((err = setup_new_group_blocks(sb, input)))
+               goto exit_put;
+
+       /*
+        * We will always be modifying at least the superblock and a GDT
+        * block.  If we are adding a group past the last current GDT block,
+        * we will also modify the inode and the dindirect block.  If we
+        * are adding a group with superblock/GDT backups  we will also
+        * modify each of the reserved GDT dindirect blocks.
+        */
+       handle = ext4_journal_start_sb(sb,
+                                      ext4_bg_has_super(sb, input->group) ?
+                                      3 + reserved_gdb : 4);
+       if (IS_ERR(handle)) {
+               err = PTR_ERR(handle);
+               goto exit_put;
+       }
+
+       lock_super(sb);
+       if (input->group != sbi->s_groups_count) {
+               ext4_warning(sb, __FUNCTION__,
+                            "multiple resizers run on filesystem!");
+               err = -EBUSY;
+               goto exit_journal;
+       }
+
+       if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
+               goto exit_journal;
+
+       /*
+        * We will only either add reserved group blocks to a backup group
+        * or remove reserved blocks for the first group in a new group block.
+        * Doing both would be mean more complex code, and sane people don't
+        * use non-sparse filesystems anymore.  This is already checked above.
+        */
+       if (gdb_off) {
+               primary = sbi->s_group_desc[gdb_num];
+               if ((err = ext4_journal_get_write_access(handle, primary)))
+                       goto exit_journal;
+
+               if (reserved_gdb && ext4_bg_num_gdb(sb, input->group) &&
+                   (err = reserve_backup_gdb(handle, inode, input)))
+                       goto exit_journal;
+       } else if ((err = add_new_gdb(handle, inode, input, &primary)))
+               goto exit_journal;
+
+       /*
+        * OK, now we've set up the new group.  Time to make it active.
+        *
+        * Current kernels don't lock all allocations via lock_super(),
+        * so we have to be safe wrt. concurrent accesses the group
+        * data.  So we need to be careful to set all of the relevant
+        * group descriptor data etc. *before* we enable the group.
+        *
+        * The key field here is sbi->s_groups_count: as long as
+        * that retains its old value, nobody is going to access the new
+        * group.
+        *
+        * So first we update all the descriptor metadata for the new
+        * group; then we update the total disk blocks count; then we
+        * update the groups count to enable the group; then finally we
+        * update the free space counts so that the system can start
+        * using the new disk blocks.
+        */
+
+       /* Update group descriptor block for new group */
+       gdp = (struct ext4_group_desc *)primary->b_data + gdb_off;
+
+       ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
+       ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
+       ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
+       gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
+       gdp->bg_free_inodes_count = cpu_to_le16(EXT4_INODES_PER_GROUP(sb));
+
+       /*
+        * Make the new blocks and inodes valid next.  We do this before
+        * increasing the group count so that once the group is enabled,
+        * all of its blocks and inodes are already valid.
+        *
+        * We always allocate group-by-group, then block-by-block or
+        * inode-by-inode within a group, so enabling these
+        * blocks/inodes before the group is live won't actually let us
+        * allocate the new space yet.
+        */
+       ext4_blocks_count_set(es, ext4_blocks_count(es) +
+               input->blocks_count);
+       es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) +
+               EXT4_INODES_PER_GROUP(sb));
+
+       /*
+        * We need to protect s_groups_count against other CPUs seeing
+        * inconsistent state in the superblock.
+        *
+        * The precise rules we use are:
+        *
+        * * Writers of s_groups_count *must* hold lock_super
+        * AND
+        * * Writers must perform a smp_wmb() after updating all dependent
+        *   data and before modifying the groups count
+        *
+        * * Readers must hold lock_super() over the access
+        * OR
+        * * Readers must perform an smp_rmb() after reading the groups count
+        *   and before reading any dependent data.
+        *
+        * NB. These rules can be relaxed when checking the group count
+        * while freeing data, as we can only allocate from a block
+        * group after serialising against the group count, and we can
+        * only then free after serialising in turn against that
+        * allocation.
+        */
+       smp_wmb();
+
+       /* Update the global fs size fields */
+       sbi->s_groups_count++;
+
+       ext4_journal_dirty_metadata(handle, primary);
+
+       /* Update the reserved block counts only once the new group is
+        * active. */
+       ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
+               input->reserved_blocks);
+
+       /* Update the free space counts */
+       percpu_counter_mod(&sbi->s_freeblocks_counter,
+                          input->free_blocks_count);
+       percpu_counter_mod(&sbi->s_freeinodes_counter,
+                          EXT4_INODES_PER_GROUP(sb));
+
+       ext4_journal_dirty_metadata(handle, sbi->s_sbh);
+       sb->s_dirt = 1;
+
+exit_journal:
+       unlock_super(sb);
+       if ((err2 = ext4_journal_stop(handle)) && !err)
+               err = err2;
+       if (!err) {
+               update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
+                              sizeof(struct ext4_super_block));
+               update_backups(sb, primary->b_blocknr, primary->b_data,
+                              primary->b_size);
+       }
+exit_put:
+       iput(inode);
+       return err;
+} /* ext4_group_add */
+
+/* Extend the filesystem to the new number of blocks specified.  This entry
+ * point is only used to extend the current filesystem to the end of the last
+ * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
+ * for emergencies (because it has no dependencies on reserved blocks).
+ *
+ * If we _really_ wanted, we could use default values to call ext4_group_add()
+ * allow the "remount" trick to work for arbitrary resizing, assuming enough
+ * GDT blocks are reserved to grow to the desired size.
+ */
+int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
+                     ext4_fsblk_t n_blocks_count)
+{
+       ext4_fsblk_t o_blocks_count;
+       unsigned long o_groups_count;
+       ext4_grpblk_t last;
+       ext4_grpblk_t add;
+       struct buffer_head * bh;
+       handle_t *handle;
+       int err;
+       unsigned long freed_blocks;
+
+       /* We don't need to worry about locking wrt other resizers just
+        * yet: we're going to revalidate es->s_blocks_count after
+        * taking lock_super() below. */
+       o_blocks_count = ext4_blocks_count(es);
+       o_groups_count = EXT4_SB(sb)->s_groups_count;
+
+       if (test_opt(sb, DEBUG))
+               printk(KERN_DEBUG "EXT4-fs: extending last group from %llu uto %llu blocks\n",
+                      o_blocks_count, n_blocks_count);
+
+       if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
+               return 0;
+
+       if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
+               printk(KERN_ERR "EXT4-fs: filesystem on %s:"
+                       " too large to resize to %llu blocks safely\n",
+                       sb->s_id, n_blocks_count);
+               if (sizeof(sector_t) < 8)
+                       ext4_warning(sb, __FUNCTION__,
+                       "CONFIG_LBD not enabled\n");
+               return -EINVAL;
+       }
+
+       if (n_blocks_count < o_blocks_count) {
+               ext4_warning(sb, __FUNCTION__,
+                            "can't shrink FS - resize aborted");
+               return -EBUSY;
+       }
+
+       /* Handle the remaining blocks in the last group only. */
+       ext4_get_group_no_and_offset(sb, o_blocks_count, NULL, &last);
+
+       if (last == 0) {
+               ext4_warning(sb, __FUNCTION__,
+                            "need to use ext2online to resize further");
+               return -EPERM;
+       }
+
+       add = EXT4_BLOCKS_PER_GROUP(sb) - last;
+
+       if (o_blocks_count + add < o_blocks_count) {
+               ext4_warning(sb, __FUNCTION__, "blocks_count overflow");
+               return -EINVAL;
+       }
+
+       if (o_blocks_count + add > n_blocks_count)
+               add = n_blocks_count - o_blocks_count;
+
+       if (o_blocks_count + add < n_blocks_count)
+               ext4_warning(sb, __FUNCTION__,
+                            "will only finish group (%llu"
+                            " blocks, %u new)",
+                            o_blocks_count + add, add);
+
+       /* See if the device is actually as big as what was requested */
+       bh = sb_bread(sb, o_blocks_count + add -1);
+       if (!bh) {
+               ext4_warning(sb, __FUNCTION__,
+                            "can't read last block, resize aborted");
+               return -ENOSPC;
+       }
+       brelse(bh);
+
+       /* We will update the superblock, one block bitmap, and
+        * one group descriptor via ext4_free_blocks().
+        */
+       handle = ext4_journal_start_sb(sb, 3);
+       if (IS_ERR(handle)) {
+               err = PTR_ERR(handle);
+               ext4_warning(sb, __FUNCTION__, "error %d on journal start",err);
+               goto exit_put;
+       }
+
+       lock_super(sb);
+       if (o_blocks_count != ext4_blocks_count(es)) {
+               ext4_warning(sb, __FUNCTION__,
+                            "multiple resizers run on filesystem!");
+               unlock_super(sb);
+               err = -EBUSY;
+               goto exit_put;
+       }
+
+       if ((err = ext4_journal_get_write_access(handle,
+                                                EXT4_SB(sb)->s_sbh))) {
+               ext4_warning(sb, __FUNCTION__,
+                            "error %d on journal write access", err);
+               unlock_super(sb);
+               ext4_journal_stop(handle);
+               goto exit_put;
+       }
+       ext4_blocks_count_set(es, o_blocks_count + add);
+       ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
+       sb->s_dirt = 1;
+       unlock_super(sb);
+       ext4_debug("freeing blocks %lu through %llu\n", o_blocks_count,
+                  o_blocks_count + add);
+       ext4_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
+       ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
+                  o_blocks_count + add);
+       if ((err = ext4_journal_stop(handle)))
+               goto exit_put;
+       if (test_opt(sb, DEBUG))
+               printk(KERN_DEBUG "EXT4-fs: extended group to %llu blocks\n",
+                      ext4_blocks_count(es));
+       update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, (char *)es,
+                      sizeof(struct ext4_super_block));
+exit_put:
+       return err;
+} /* ext4_group_extend */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
new file mode 100644 (file)
index 0000000..b4b022a
--- /dev/null
@@ -0,0 +1,2829 @@
+/*
+ *  linux/fs/ext4/super.c
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/inode.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Big-endian to little-endian byte-swapping/bitmaps by
+ *        David S. Miller (davem@caip.rutgers.edu), 1995
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/jbd2.h>
+#include <linux/ext4_fs.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/parser.h>
+#include <linux/smp_lock.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <linux/random.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/quotaops.h>
+#include <linux/seq_file.h>
+
+#include <asm/uaccess.h>
+
+#include "xattr.h"
+#include "acl.h"
+#include "namei.h"
+
+static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
+                            unsigned long journal_devnum);
+static int ext4_create_journal(struct super_block *, struct ext4_super_block *,
+                              unsigned int);
+static void ext4_commit_super (struct super_block * sb,
+                              struct ext4_super_block * es,
+                              int sync);
+static void ext4_mark_recovery_complete(struct super_block * sb,
+                                       struct ext4_super_block * es);
+static void ext4_clear_journal_err(struct super_block * sb,
+                                  struct ext4_super_block * es);
+static int ext4_sync_fs(struct super_block *sb, int wait);
+static const char *ext4_decode_error(struct super_block * sb, int errno,
+                                    char nbuf[16]);
+static int ext4_remount (struct super_block * sb, int * flags, char * data);
+static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf);
+static void ext4_unlockfs(struct super_block *sb);
+static void ext4_write_super (struct super_block * sb);
+static void ext4_write_super_lockfs(struct super_block *sb);
+
+
+ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
+                              struct ext4_group_desc *bg)
+{
+       return le32_to_cpu(bg->bg_block_bitmap) |
+               (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
+                (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
+}
+
+ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
+                              struct ext4_group_desc *bg)
+{
+       return le32_to_cpu(bg->bg_inode_bitmap) |
+               (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
+                (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
+}
+
+ext4_fsblk_t ext4_inode_table(struct super_block *sb,
+                             struct ext4_group_desc *bg)
+{
+       return le32_to_cpu(bg->bg_inode_table) |
+               (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
+                (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
+}
+
+void ext4_block_bitmap_set(struct super_block *sb,
+                          struct ext4_group_desc *bg, ext4_fsblk_t blk)
+{
+       bg->bg_block_bitmap = cpu_to_le32((u32)blk);
+       if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
+               bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
+}
+
+void ext4_inode_bitmap_set(struct super_block *sb,
+                          struct ext4_group_desc *bg, ext4_fsblk_t blk)
+{
+       bg->bg_inode_bitmap  = cpu_to_le32((u32)blk);
+       if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
+               bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
+}
+
+void ext4_inode_table_set(struct super_block *sb,
+                         struct ext4_group_desc *bg, ext4_fsblk_t blk)
+{
+       bg->bg_inode_table = cpu_to_le32((u32)blk);
+       if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
+               bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
+}
+
+/*
+ * Wrappers for jbd2_journal_start/end.
+ *
+ * The only special thing we need to do here is to make sure that all
+ * journal_end calls result in the superblock being marked dirty, so
+ * that sync() will call the filesystem's write_super callback if
+ * appropriate.
+ */
+handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
+{
+       journal_t *journal;
+
+       if (sb->s_flags & MS_RDONLY)
+               return ERR_PTR(-EROFS);
+
+       /* Special case here: if the journal has aborted behind our
+        * backs (eg. EIO in the commit thread), then we still need to
+        * take the FS itself readonly cleanly. */
+       journal = EXT4_SB(sb)->s_journal;
+       if (is_journal_aborted(journal)) {
+               ext4_abort(sb, __FUNCTION__,
+                          "Detected aborted journal");
+               return ERR_PTR(-EROFS);
+       }
+
+       return jbd2_journal_start(journal, nblocks);
+}
+
+/*
+ * The only special thing we need to do here is to make sure that all
+ * jbd2_journal_stop calls result in the superblock being marked dirty, so
+ * that sync() will call the filesystem's write_super callback if
+ * appropriate.
+ */
+int __ext4_journal_stop(const char *where, handle_t *handle)
+{
+       struct super_block *sb;
+       int err;
+       int rc;
+
+       sb = handle->h_transaction->t_journal->j_private;
+       err = handle->h_err;
+       rc = jbd2_journal_stop(handle);
+
+       if (!err)
+               err = rc;
+       if (err)
+               __ext4_std_error(sb, where, err);
+       return err;
+}
+
+void ext4_journal_abort_handle(const char *caller, const char *err_fn,
+               struct buffer_head *bh, handle_t *handle, int err)
+{
+       char nbuf[16];
+       const char *errstr = ext4_decode_error(NULL, err, nbuf);
+
+       if (bh)
+               BUFFER_TRACE(bh, "abort");
+
+       if (!handle->h_err)
+               handle->h_err = err;
+
+       if (is_handle_aborted(handle))
+               return;
+
+       printk(KERN_ERR "%s: aborting transaction: %s in %s\n",
+              caller, errstr, err_fn);
+
+       jbd2_journal_abort_handle(handle);
+}
+
+/* Deal with the reporting of failure conditions on a filesystem such as
+ * inconsistencies detected or read IO failures.
+ *
+ * On ext2, we can store the error state of the filesystem in the
+ * superblock.  That is not possible on ext4, because we may have other
+ * write ordering constraints on the superblock which prevent us from
+ * writing it out straight away; and given that the journal is about to
+ * be aborted, we can't rely on the current, or future, transactions to
+ * write out the superblock safely.
+ *
+ * We'll just use the jbd2_journal_abort() error code to record an error in
+ * the journal instead.  On recovery, the journal will compain about
+ * that error until we've noted it down and cleared it.
+ */
+
+static void ext4_handle_error(struct super_block *sb)
+{
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+       EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+       es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+
+       if (sb->s_flags & MS_RDONLY)
+               return;
+
+       if (!test_opt (sb, ERRORS_CONT)) {
+               journal_t *journal = EXT4_SB(sb)->s_journal;
+
+               EXT4_SB(sb)->s_mount_opt |= EXT4_MOUNT_ABORT;
+               if (journal)
+                       jbd2_journal_abort(journal, -EIO);
+       }
+       if (test_opt (sb, ERRORS_RO)) {
+               printk (KERN_CRIT "Remounting filesystem read-only\n");
+               sb->s_flags |= MS_RDONLY;
+       }
+       ext4_commit_super(sb, es, 1);
+       if (test_opt(sb, ERRORS_PANIC))
+               panic("EXT4-fs (device %s): panic forced after error\n",
+                       sb->s_id);
+}
+
+void ext4_error (struct super_block * sb, const char * function,
+                const char * fmt, ...)
+{
+       va_list args;
+
+       va_start(args, fmt);
+       printk(KERN_CRIT "EXT4-fs error (device %s): %s: ",sb->s_id, function);
+       vprintk(fmt, args);
+       printk("\n");
+       va_end(args);
+
+       ext4_handle_error(sb);
+}
+
+static const char *ext4_decode_error(struct super_block * sb, int errno,
+                                    char nbuf[16])
+{
+       char *errstr = NULL;
+
+       switch (errno) {
+       case -EIO:
+               errstr = "IO failure";
+               break;
+       case -ENOMEM:
+               errstr = "Out of memory";
+               break;
+       case -EROFS:
+               if (!sb || EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)
+                       errstr = "Journal has aborted";
+               else
+                       errstr = "Readonly filesystem";
+               break;
+       default:
+               /* If the caller passed in an extra buffer for unknown
+                * errors, textualise them now.  Else we just return
+                * NULL. */
+               if (nbuf) {
+                       /* Check for truncated error codes... */
+                       if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
+                               errstr = nbuf;
+               }
+               break;
+       }
+
+       return errstr;
+}
+
+/* __ext4_std_error decodes expected errors from journaling functions
+ * automatically and invokes the appropriate error response.  */
+
+void __ext4_std_error (struct super_block * sb, const char * function,
+                      int errno)
+{
+       char nbuf[16];
+       const char *errstr;
+
+       /* Special case: if the error is EROFS, and we're not already
+        * inside a transaction, then there's really no point in logging
+        * an error. */
+       if (errno == -EROFS && journal_current_handle() == NULL &&
+           (sb->s_flags & MS_RDONLY))
+               return;
+
+       errstr = ext4_decode_error(sb, errno, nbuf);
+       printk (KERN_CRIT "EXT4-fs error (device %s) in %s: %s\n",
+               sb->s_id, function, errstr);
+
+       ext4_handle_error(sb);
+}
+
+/*
+ * ext4_abort is a much stronger failure handler than ext4_error.  The
+ * abort function may be used to deal with unrecoverable failures such
+ * as journal IO errors or ENOMEM at a critical moment in log management.
+ *
+ * We unconditionally force the filesystem into an ABORT|READONLY state,
+ * unless the error response on the fs has been set to panic in which
+ * case we take the easy way out and panic immediately.
+ */
+
+void ext4_abort (struct super_block * sb, const char * function,
+                const char * fmt, ...)
+{
+       va_list args;
+
+       printk (KERN_CRIT "ext4_abort called.\n");
+
+       va_start(args, fmt);
+       printk(KERN_CRIT "EXT4-fs error (device %s): %s: ",sb->s_id, function);
+       vprintk(fmt, args);
+       printk("\n");
+       va_end(args);
+
+       if (test_opt(sb, ERRORS_PANIC))
+               panic("EXT4-fs panic from previous error\n");
+
+       if (sb->s_flags & MS_RDONLY)
+               return;
+
+       printk(KERN_CRIT "Remounting filesystem read-only\n");
+       EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+       sb->s_flags |= MS_RDONLY;
+       EXT4_SB(sb)->s_mount_opt |= EXT4_MOUNT_ABORT;
+       jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
+}
+
+void ext4_warning (struct super_block * sb, const char * function,
+                  const char * fmt, ...)
+{
+       va_list args;
+
+       va_start(args, fmt);
+       printk(KERN_WARNING "EXT4-fs warning (device %s): %s: ",
+              sb->s_id, function);
+       vprintk(fmt, args);
+       printk("\n");
+       va_end(args);
+}
+
+void ext4_update_dynamic_rev(struct super_block *sb)
+{
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+
+       if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
+               return;
+
+       ext4_warning(sb, __FUNCTION__,
+                    "updating to rev %d because of new feature flag, "
+                    "running e2fsck is recommended",
+                    EXT4_DYNAMIC_REV);
+
+       es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
+       es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
+       es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
+       /* leave es->s_feature_*compat flags alone */
+       /* es->s_uuid will be set by e2fsck if empty */
+
+       /*
+        * The rest of the superblock fields should be zero, and if not it
+        * means they are likely already in use, so leave them alone.  We
+        * can leave it up to e2fsck to clean up any inconsistencies there.
+        */
+}
+
+/*
+ * Open the external journal device
+ */
+static struct block_device *ext4_blkdev_get(dev_t dev)
+{
+       struct block_device *bdev;
+       char b[BDEVNAME_SIZE];
+
+       bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+       if (IS_ERR(bdev))
+               goto fail;
+       return bdev;
+
+fail:
+       printk(KERN_ERR "EXT4: failed to open journal device %s: %ld\n",
+                       __bdevname(dev, b), PTR_ERR(bdev));
+       return NULL;
+}
+
+/*
+ * Release the journal device
+ */
+static int ext4_blkdev_put(struct block_device *bdev)
+{
+       bd_release(bdev);
+       return blkdev_put(bdev);
+}
+
+static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
+{
+       struct block_device *bdev;
+       int ret = -ENODEV;
+
+       bdev = sbi->journal_bdev;
+       if (bdev) {
+               ret = ext4_blkdev_put(bdev);
+               sbi->journal_bdev = NULL;
+       }
+       return ret;
+}
+
+static inline struct inode *orphan_list_entry(struct list_head *l)
+{
+       return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
+}
+
+static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
+{
+       struct list_head *l;
+
+       printk(KERN_ERR "sb orphan head is %d\n",
+              le32_to_cpu(sbi->s_es->s_last_orphan));
+
+       printk(KERN_ERR "sb_info orphan list:\n");
+       list_for_each(l, &sbi->s_orphan) {
+               struct inode *inode = orphan_list_entry(l);
+               printk(KERN_ERR "  "
+                      "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
+                      inode->i_sb->s_id, inode->i_ino, inode,
+                      inode->i_mode, inode->i_nlink,
+                      NEXT_ORPHAN(inode));
+       }
+}
+
+static void ext4_put_super (struct super_block * sb)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       int i;
+
+       ext4_ext_release(sb);
+       ext4_xattr_put_super(sb);
+       jbd2_journal_destroy(sbi->s_journal);
+       if (!(sb->s_flags & MS_RDONLY)) {
+               EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+               es->s_state = cpu_to_le16(sbi->s_mount_state);
+               BUFFER_TRACE(sbi->s_sbh, "marking dirty");
+               mark_buffer_dirty(sbi->s_sbh);
+               ext4_commit_super(sb, es, 1);
+       }
+
+       for (i = 0; i < sbi->s_gdb_count; i++)
+               brelse(sbi->s_group_desc[i]);
+       kfree(sbi->s_group_desc);
+       percpu_counter_destroy(&sbi->s_freeblocks_counter);
+       percpu_counter_destroy(&sbi->s_freeinodes_counter);
+       percpu_counter_destroy(&sbi->s_dirs_counter);
+       brelse(sbi->s_sbh);
+#ifdef CONFIG_QUOTA
+       for (i = 0; i < MAXQUOTAS; i++)
+               kfree(sbi->s_qf_names[i]);
+#endif
+
+       /* Debugging code just in case the in-memory inode orphan list
+        * isn't empty.  The on-disk one can be non-empty if we've
+        * detected an error and taken the fs readonly, but the
+        * in-memory list had better be clean by this point. */
+       if (!list_empty(&sbi->s_orphan))
+               dump_orphan_list(sb, sbi);
+       J_ASSERT(list_empty(&sbi->s_orphan));
+
+       invalidate_bdev(sb->s_bdev, 0);
+       if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
+               /*
+                * Invalidate the journal device's buffers.  We don't want them
+                * floating about in memory - the physical journal device may
+                * hotswapped, and it breaks the `ro-after' testing code.
+                */
+               sync_blockdev(sbi->journal_bdev);
+               invalidate_bdev(sbi->journal_bdev, 0);
+               ext4_blkdev_remove(sbi);
+       }
+       sb->s_fs_info = NULL;
+       kfree(sbi);
+       return;
+}
+
+static kmem_cache_t *ext4_inode_cachep;
+
+/*
+ * Called inside transaction, so use GFP_NOFS
+ */
+static struct inode *ext4_alloc_inode(struct super_block *sb)
+{
+       struct ext4_inode_info *ei;
+
+       ei = kmem_cache_alloc(ext4_inode_cachep, SLAB_NOFS);
+       if (!ei)
+               return NULL;
+#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
+       ei->i_acl = EXT4_ACL_NOT_CACHED;
+       ei->i_default_acl = EXT4_ACL_NOT_CACHED;
+#endif
+       ei->i_block_alloc_info = NULL;
+       ei->vfs_inode.i_version = 1;
+       memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
+       return &ei->vfs_inode;
+}
+
+static void ext4_destroy_inode(struct inode *inode)
+{
+       kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
+}
+
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+       struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
+
+       if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+           SLAB_CTOR_CONSTRUCTOR) {
+               INIT_LIST_HEAD(&ei->i_orphan);
+#ifdef CONFIG_EXT4DEV_FS_XATTR
+               init_rwsem(&ei->xattr_sem);
+#endif
+               mutex_init(&ei->truncate_mutex);
+               inode_init_once(&ei->vfs_inode);
+       }
+}
+
+static int init_inodecache(void)
+{
+       ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
+                                            sizeof(struct ext4_inode_info),
+                                            0, (SLAB_RECLAIM_ACCOUNT|
+                                               SLAB_MEM_SPREAD),
+                                            init_once, NULL);
+       if (ext4_inode_cachep == NULL)
+               return -ENOMEM;
+       return 0;
+}
+
+static void destroy_inodecache(void)
+{
+       kmem_cache_destroy(ext4_inode_cachep);
+}
+
+static void ext4_clear_inode(struct inode *inode)
+{
+       struct ext4_block_alloc_info *rsv = EXT4_I(inode)->i_block_alloc_info;
+#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
+       if (EXT4_I(inode)->i_acl &&
+                       EXT4_I(inode)->i_acl != EXT4_ACL_NOT_CACHED) {
+               posix_acl_release(EXT4_I(inode)->i_acl);
+               EXT4_I(inode)->i_acl = EXT4_ACL_NOT_CACHED;
+       }
+       if (EXT4_I(inode)->i_default_acl &&
+                       EXT4_I(inode)->i_default_acl != EXT4_ACL_NOT_CACHED) {
+               posix_acl_release(EXT4_I(inode)->i_default_acl);
+               EXT4_I(inode)->i_default_acl = EXT4_ACL_NOT_CACHED;
+       }
+#endif
+       ext4_discard_reservation(inode);
+       EXT4_I(inode)->i_block_alloc_info = NULL;
+       if (unlikely(rsv))
+               kfree(rsv);
+}
+
+static inline void ext4_show_quota_options(struct seq_file *seq, struct super_block *sb)
+{
+#if defined(CONFIG_QUOTA)
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (sbi->s_jquota_fmt)
+               seq_printf(seq, ",jqfmt=%s",
+               (sbi->s_jquota_fmt == QFMT_VFS_OLD) ? "vfsold": "vfsv0");
+
+       if (sbi->s_qf_names[USRQUOTA])
+               seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
+
+       if (sbi->s_qf_names[GRPQUOTA])
+               seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
+
+       if (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA)
+               seq_puts(seq, ",usrquota");
+
+       if (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA)
+               seq_puts(seq, ",grpquota");
+#endif
+}
+
+static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
+{
+       struct super_block *sb = vfs->mnt_sb;
+
+       if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+               seq_puts(seq, ",data=journal");
+       else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+               seq_puts(seq, ",data=ordered");
+       else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
+               seq_puts(seq, ",data=writeback");
+
+       ext4_show_quota_options(seq, sb);
+
+       return 0;
+}
+
+
+static struct dentry *ext4_get_dentry(struct super_block *sb, void *vobjp)
+{
+       __u32 *objp = vobjp;
+       unsigned long ino = objp[0];
+       __u32 generation = objp[1];
+       struct inode *inode;
+       struct dentry *result;
+
+       if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
+               return ERR_PTR(-ESTALE);
+       if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
+               return ERR_PTR(-ESTALE);
+
+       /* iget isn't really right if the inode is currently unallocated!!
+        *
+        * ext4_read_inode will return a bad_inode if the inode had been
+        * deleted, so we should be safe.
+        *
+        * Currently we don't know the generation for parent directory, so
+        * a generation of 0 means "accept any"
+        */
+       inode = iget(sb, ino);
+       if (inode == NULL)
+               return ERR_PTR(-ENOMEM);
+       if (is_bad_inode(inode) ||
+           (generation && inode->i_generation != generation)) {
+               iput(inode);
+               return ERR_PTR(-ESTALE);
+       }
+       /* now to find a dentry.
+        * If possible, get a well-connected one
+        */
+       result = d_alloc_anon(inode);
+       if (!result) {
+               iput(inode);
+               return ERR_PTR(-ENOMEM);
+       }
+       return result;
+}
+
+#ifdef CONFIG_QUOTA
+#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
+#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
+
+static int ext4_dquot_initialize(struct inode *inode, int type);
+static int ext4_dquot_drop(struct inode *inode);
+static int ext4_write_dquot(struct dquot *dquot);
+static int ext4_acquire_dquot(struct dquot *dquot);
+static int ext4_release_dquot(struct dquot *dquot);
+static int ext4_mark_dquot_dirty(struct dquot *dquot);
+static int ext4_write_info(struct super_block *sb, int type);
+static int ext4_quota_on(struct super_block *sb, int type, int format_id, char *path);
+static int ext4_quota_on_mount(struct super_block *sb, int type);
+static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
+                              size_t len, loff_t off);
+static ssize_t ext4_quota_write(struct super_block *sb, int type,
+                               const char *data, size_t len, loff_t off);
+
+static struct dquot_operations ext4_quota_operations = {
+       .initialize     = ext4_dquot_initialize,
+       .drop           = ext4_dquot_drop,
+       .alloc_space    = dquot_alloc_space,
+       .alloc_inode    = dquot_alloc_inode,
+       .free_space     = dquot_free_space,
+       .free_inode     = dquot_free_inode,
+       .transfer       = dquot_transfer,
+       .write_dquot    = ext4_write_dquot,
+       .acquire_dquot  = ext4_acquire_dquot,
+       .release_dquot  = ext4_release_dquot,
+       .mark_dirty     = ext4_mark_dquot_dirty,
+       .write_info     = ext4_write_info
+};
+
+static struct quotactl_ops ext4_qctl_operations = {
+       .quota_on       = ext4_quota_on,
+       .quota_off      = vfs_quota_off,
+       .quota_sync     = vfs_quota_sync,
+       .get_info       = vfs_get_dqinfo,
+       .set_info       = vfs_set_dqinfo,
+       .get_dqblk      = vfs_get_dqblk,
+       .set_dqblk      = vfs_set_dqblk
+};
+#endif
+
+static struct super_operations ext4_sops = {
+       .alloc_inode    = ext4_alloc_inode,
+       .destroy_inode  = ext4_destroy_inode,
+       .read_inode     = ext4_read_inode,
+       .write_inode    = ext4_write_inode,
+       .dirty_inode    = ext4_dirty_inode,
+       .delete_inode   = ext4_delete_inode,
+       .put_super      = ext4_put_super,
+       .write_super    = ext4_write_super,
+       .sync_fs        = ext4_sync_fs,
+       .write_super_lockfs = ext4_write_super_lockfs,
+       .unlockfs       = ext4_unlockfs,
+       .statfs         = ext4_statfs,
+       .remount_fs     = ext4_remount,
+       .clear_inode    = ext4_clear_inode,
+       .show_options   = ext4_show_options,
+#ifdef CONFIG_QUOTA
+       .quota_read     = ext4_quota_read,
+       .quota_write    = ext4_quota_write,
+#endif
+};
+
+static struct export_operations ext4_export_ops = {
+       .get_parent = ext4_get_parent,
+       .get_dentry = ext4_get_dentry,
+};
+
+enum {
+       Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
+       Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
+       Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
+       Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
+       Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
+       Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
+       Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
+       Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
+       Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
+       Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
+       Opt_grpquota, Opt_extents,
+};
+
+static match_table_t tokens = {
+       {Opt_bsd_df, "bsddf"},
+       {Opt_minix_df, "minixdf"},
+       {Opt_grpid, "grpid"},
+       {Opt_grpid, "bsdgroups"},
+       {Opt_nogrpid, "nogrpid"},
+       {Opt_nogrpid, "sysvgroups"},
+       {Opt_resgid, "resgid=%u"},
+       {Opt_resuid, "resuid=%u"},
+       {Opt_sb, "sb=%u"},
+       {Opt_err_cont, "errors=continue"},
+       {Opt_err_panic, "errors=panic"},
+       {Opt_err_ro, "errors=remount-ro"},
+       {Opt_nouid32, "nouid32"},
+       {Opt_nocheck, "nocheck"},
+       {Opt_nocheck, "check=none"},
+       {Opt_debug, "debug"},
+       {Opt_oldalloc, "oldalloc"},
+       {Opt_orlov, "orlov"},
+       {Opt_user_xattr, "user_xattr"},
+       {Opt_nouser_xattr, "nouser_xattr"},
+       {Opt_acl, "acl"},
+       {Opt_noacl, "noacl"},
+       {Opt_reservation, "reservation"},
+       {Opt_noreservation, "noreservation"},
+       {Opt_noload, "noload"},
+       {Opt_nobh, "nobh"},
+       {Opt_bh, "bh"},
+       {Opt_commit, "commit=%u"},
+       {Opt_journal_update, "journal=update"},
+       {Opt_journal_inum, "journal=%u"},
+       {Opt_journal_dev, "journal_dev=%u"},
+       {Opt_abort, "abort"},
+       {Opt_data_journal, "data=journal"},
+       {Opt_data_ordered, "data=ordered"},
+       {Opt_data_writeback, "data=writeback"},
+       {Opt_offusrjquota, "usrjquota="},
+       {Opt_usrjquota, "usrjquota=%s"},
+       {Opt_offgrpjquota, "grpjquota="},
+       {Opt_grpjquota, "grpjquota=%s"},
+       {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
+       {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
+       {Opt_grpquota, "grpquota"},
+       {Opt_noquota, "noquota"},
+       {Opt_quota, "quota"},
+       {Opt_usrquota, "usrquota"},
+       {Opt_barrier, "barrier=%u"},
+       {Opt_extents, "extents"},
+       {Opt_err, NULL},
+       {Opt_resize, "resize"},
+};
+
+static ext4_fsblk_t get_sb_block(void **data)
+{
+       ext4_fsblk_t    sb_block;
+       char            *options = (char *) *data;
+
+       if (!options || strncmp(options, "sb=", 3) != 0)
+               return 1;       /* Default location */
+       options += 3;
+       /*todo: use simple_strtoll with >32bit ext4 */
+       sb_block = simple_strtoul(options, &options, 0);
+       if (*options && *options != ',') {
+               printk("EXT4-fs: Invalid sb specification: %s\n",
+                      (char *) *data);
+               return 1;
+       }
+       if (*options == ',')
+               options++;
+       *data = (void *) options;
+       return sb_block;
+}
+
+static int parse_options (char *options, struct super_block *sb,
+                         unsigned int *inum, unsigned long *journal_devnum,
+                         ext4_fsblk_t *n_blocks_count, int is_remount)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       char * p;
+       substring_t args[MAX_OPT_ARGS];
+       int data_opt = 0;
+       int option;
+#ifdef CONFIG_QUOTA
+       int qtype;
+       char *qname;
+#endif
+
+       if (!options)
+               return 1;
+
+       while ((p = strsep (&options, ",")) != NULL) {
+               int token;
+               if (!*p)
+                       continue;
+
+               token = match_token(p, tokens, args);
+               switch (token) {
+               case Opt_bsd_df:
+                       clear_opt (sbi->s_mount_opt, MINIX_DF);
+                       break;
+               case Opt_minix_df:
+                       set_opt (sbi->s_mount_opt, MINIX_DF);
+                       break;
+               case Opt_grpid:
+                       set_opt (sbi->s_mount_opt, GRPID);
+                       break;
+               case Opt_nogrpid:
+                       clear_opt (sbi->s_mount_opt, GRPID);
+                       break;
+               case Opt_resuid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       sbi->s_resuid = option;
+                       break;
+               case Opt_resgid:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       sbi->s_resgid = option;
+                       break;
+               case Opt_sb:
+                       /* handled by get_sb_block() instead of here */
+                       /* *sb_block = match_int(&args[0]); */
+                       break;
+               case Opt_err_panic:
+                       clear_opt (sbi->s_mount_opt, ERRORS_CONT);
+                       clear_opt (sbi->s_mount_opt, ERRORS_RO);
+                       set_opt (sbi->s_mount_opt, ERRORS_PANIC);
+                       break;
+               case Opt_err_ro:
+                       clear_opt (sbi->s_mount_opt, ERRORS_CONT);
+                       clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
+                       set_opt (sbi->s_mount_opt, ERRORS_RO);
+                       break;
+               case Opt_err_cont:
+                       clear_opt (sbi->s_mount_opt, ERRORS_RO);
+                       clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
+                       set_opt (sbi->s_mount_opt, ERRORS_CONT);
+                       break;
+               case Opt_nouid32:
+                       set_opt (sbi->s_mount_opt, NO_UID32);
+                       break;
+               case Opt_nocheck:
+                       clear_opt (sbi->s_mount_opt, CHECK);
+                       break;
+               case Opt_debug:
+                       set_opt (sbi->s_mount_opt, DEBUG);
+                       break;
+               case Opt_oldalloc:
+                       set_opt (sbi->s_mount_opt, OLDALLOC);
+                       break;
+               case Opt_orlov:
+                       clear_opt (sbi->s_mount_opt, OLDALLOC);
+                       break;
+#ifdef CONFIG_EXT4DEV_FS_XATTR
+               case Opt_user_xattr:
+                       set_opt (sbi->s_mount_opt, XATTR_USER);
+                       break;
+               case Opt_nouser_xattr:
+                       clear_opt (sbi->s_mount_opt, XATTR_USER);
+                       break;
+#else
+               case Opt_user_xattr:
+               case Opt_nouser_xattr:
+                       printk("EXT4 (no)user_xattr options not supported\n");
+                       break;
+#endif
+#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
+               case Opt_acl:
+                       set_opt(sbi->s_mount_opt, POSIX_ACL);
+                       break;
+               case Opt_noacl:
+                       clear_opt(sbi->s_mount_opt, POSIX_ACL);
+                       break;
+#else
+               case Opt_acl:
+               case Opt_noacl:
+                       printk("EXT4 (no)acl options not supported\n");
+                       break;
+#endif
+               case Opt_reservation:
+                       set_opt(sbi->s_mount_opt, RESERVATION);
+                       break;
+               case Opt_noreservation:
+                       clear_opt(sbi->s_mount_opt, RESERVATION);
+                       break;
+               case Opt_journal_update:
+                       /* @@@ FIXME */
+                       /* Eventually we will want to be able to create
+                          a journal file here.  For now, only allow the
+                          user to specify an existing inode to be the
+                          journal file. */
+                       if (is_remount) {
+                               printk(KERN_ERR "EXT4-fs: cannot specify "
+                                      "journal on remount\n");
+                               return 0;
+                       }
+                       set_opt (sbi->s_mount_opt, UPDATE_JOURNAL);
+                       break;
+               case Opt_journal_inum:
+                       if (is_remount) {
+                               printk(KERN_ERR "EXT4-fs: cannot specify "
+                                      "journal on remount\n");
+                               return 0;
+                       }
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       *inum = option;
+                       break;
+               case Opt_journal_dev:
+                       if (is_remount) {
+                               printk(KERN_ERR "EXT4-fs: cannot specify "
+                                      "journal on remount\n");
+                               return 0;
+                       }
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       *journal_devnum = option;
+                       break;
+               case Opt_noload:
+                       set_opt (sbi->s_mount_opt, NOLOAD);
+                       break;
+               case Opt_commit:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       if (option < 0)
+                               return 0;
+                       if (option == 0)
+                               option = JBD_DEFAULT_MAX_COMMIT_AGE;
+                       sbi->s_commit_interval = HZ * option;
+                       break;
+               case Opt_data_journal:
+                       data_opt = EXT4_MOUNT_JOURNAL_DATA;
+                       goto datacheck;
+               case Opt_data_ordered:
+                       data_opt = EXT4_MOUNT_ORDERED_DATA;
+                       goto datacheck;
+               case Opt_data_writeback:
+                       data_opt = EXT4_MOUNT_WRITEBACK_DATA;
+               datacheck:
+                       if (is_remount) {
+                               if ((sbi->s_mount_opt & EXT4_MOUNT_DATA_FLAGS)
+                                               != data_opt) {
+                                       printk(KERN_ERR
+                                               "EXT4-fs: cannot change data "
+                                               "mode on remount\n");
+                                       return 0;
+                               }
+                       } else {
+                               sbi->s_mount_opt &= ~EXT4_MOUNT_DATA_FLAGS;
+                               sbi->s_mount_opt |= data_opt;
+                       }
+                       break;
+#ifdef CONFIG_QUOTA
+               case Opt_usrjquota:
+                       qtype = USRQUOTA;
+                       goto set_qf_name;
+               case Opt_grpjquota:
+                       qtype = GRPQUOTA;
+set_qf_name:
+                       if (sb_any_quota_enabled(sb)) {
+                               printk(KERN_ERR
+                                       "EXT4-fs: Cannot change journalled "
+                                       "quota options when quota turned on.\n");
+                               return 0;
+                       }
+                       qname = match_strdup(&args[0]);
+                       if (!qname) {
+                               printk(KERN_ERR
+                                       "EXT4-fs: not enough memory for "
+                                       "storing quotafile name.\n");
+                               return 0;
+                       }
+                       if (sbi->s_qf_names[qtype] &&
+                           strcmp(sbi->s_qf_names[qtype], qname)) {
+                               printk(KERN_ERR
+                                       "EXT4-fs: %s quota file already "
+                                       "specified.\n", QTYPE2NAME(qtype));
+                               kfree(qname);
+                               return 0;
+                       }
+                       sbi->s_qf_names[qtype] = qname;
+                       if (strchr(sbi->s_qf_names[qtype], '/')) {
+                               printk(KERN_ERR
+                                       "EXT4-fs: quotafile must be on "
+                                       "filesystem root.\n");
+                               kfree(sbi->s_qf_names[qtype]);
+                               sbi->s_qf_names[qtype] = NULL;
+                               return 0;
+                       }
+                       set_opt(sbi->s_mount_opt, QUOTA);
+                       break;
+               case Opt_offusrjquota:
+                       qtype = USRQUOTA;
+                       goto clear_qf_name;
+               case Opt_offgrpjquota:
+                       qtype = GRPQUOTA;
+clear_qf_name:
+                       if (sb_any_quota_enabled(sb)) {
+                               printk(KERN_ERR "EXT4-fs: Cannot change "
+                                       "journalled quota options when "
+                                       "quota turned on.\n");
+                               return 0;
+                       }
+                       /*
+                        * The space will be released later when all options
+                        * are confirmed to be correct
+                        */
+                       sbi->s_qf_names[qtype] = NULL;
+                       break;
+               case Opt_jqfmt_vfsold:
+                       sbi->s_jquota_fmt = QFMT_VFS_OLD;
+                       break;
+               case Opt_jqfmt_vfsv0:
+                       sbi->s_jquota_fmt = QFMT_VFS_V0;
+                       break;
+               case Opt_quota:
+               case Opt_usrquota:
+                       set_opt(sbi->s_mount_opt, QUOTA);
+                       set_opt(sbi->s_mount_opt, USRQUOTA);
+                       break;
+               case Opt_grpquota:
+                       set_opt(sbi->s_mount_opt, QUOTA);
+                       set_opt(sbi->s_mount_opt, GRPQUOTA);
+                       break;
+               case Opt_noquota:
+                       if (sb_any_quota_enabled(sb)) {
+                               printk(KERN_ERR "EXT4-fs: Cannot change quota "
+                                       "options when quota turned on.\n");
+                               return 0;
+                       }
+                       clear_opt(sbi->s_mount_opt, QUOTA);
+                       clear_opt(sbi->s_mount_opt, USRQUOTA);
+                       clear_opt(sbi->s_mount_opt, GRPQUOTA);
+                       break;
+#else
+               case Opt_quota:
+               case Opt_usrquota:
+               case Opt_grpquota:
+               case Opt_usrjquota:
+               case Opt_grpjquota:
+               case Opt_offusrjquota:
+               case Opt_offgrpjquota:
+               case Opt_jqfmt_vfsold:
+               case Opt_jqfmt_vfsv0:
+                       printk(KERN_ERR
+                               "EXT4-fs: journalled quota options not "
+                               "supported.\n");
+                       break;
+               case Opt_noquota:
+                       break;
+#endif
+               case Opt_abort:
+                       set_opt(sbi->s_mount_opt, ABORT);
+                       break;
+               case Opt_barrier:
+                       if (match_int(&args[0], &option))
+                               return 0;
+                       if (option)
+                               set_opt(sbi->s_mount_opt, BARRIER);
+                       else
+                               clear_opt(sbi->s_mount_opt, BARRIER);
+                       break;
+               case Opt_ignore:
+                       break;
+               case Opt_resize:
+                       if (!is_remount) {
+                               printk("EXT4-fs: resize option only available "
+                                       "for remount\n");
+                               return 0;
+                       }
+                       if (match_int(&args[0], &option) != 0)
+                               return 0;
+                       *n_blocks_count = option;
+                       break;
+               case Opt_nobh:
+                       set_opt(sbi->s_mount_opt, NOBH);
+                       break;
+               case Opt_bh:
+                       clear_opt(sbi->s_mount_opt, NOBH);
+                       break;
+               case Opt_extents:
+                       set_opt (sbi->s_mount_opt, EXTENTS);
+                       break;
+               default:
+                       printk (KERN_ERR
+                               "EXT4-fs: Unrecognized mount option \"%s\" "
+                               "or missing value\n", p);
+                       return 0;
+               }
+       }
+#ifdef CONFIG_QUOTA
+       if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
+               if ((sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA) &&
+                    sbi->s_qf_names[USRQUOTA])
+                       clear_opt(sbi->s_mount_opt, USRQUOTA);
+
+               if ((sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA) &&
+                    sbi->s_qf_names[GRPQUOTA])
+                       clear_opt(sbi->s_mount_opt, GRPQUOTA);
+
+               if ((sbi->s_qf_names[USRQUOTA] &&
+                               (sbi->s_mount_opt & EXT4_MOUNT_GRPQUOTA)) ||
+                   (sbi->s_qf_names[GRPQUOTA] &&
+                               (sbi->s_mount_opt & EXT4_MOUNT_USRQUOTA))) {
+                       printk(KERN_ERR "EXT4-fs: old and new quota "
+                                       "format mixing.\n");
+                       return 0;
+               }
+
+               if (!sbi->s_jquota_fmt) {
+                       printk(KERN_ERR "EXT4-fs: journalled quota format "
+                                       "not specified.\n");
+                       return 0;
+               }
+       } else {
+               if (sbi->s_jquota_fmt) {
+                       printk(KERN_ERR "EXT4-fs: journalled quota format "
+                                       "specified with no journalling "
+                                       "enabled.\n");
+                       return 0;
+               }
+       }
+#endif
+       return 1;
+}
+
+static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
+                           int read_only)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int res = 0;
+
+       if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
+               printk (KERN_ERR "EXT4-fs warning: revision level too high, "
+                       "forcing read-only mode\n");
+               res = MS_RDONLY;
+       }
+       if (read_only)
+               return res;
+       if (!(sbi->s_mount_state & EXT4_VALID_FS))
+               printk (KERN_WARNING "EXT4-fs warning: mounting unchecked fs, "
+                       "running e2fsck is recommended\n");
+       else if ((sbi->s_mount_state & EXT4_ERROR_FS))
+               printk (KERN_WARNING
+                       "EXT4-fs warning: mounting fs with errors, "
+                       "running e2fsck is recommended\n");
+       else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
+                le16_to_cpu(es->s_mnt_count) >=
+                (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
+               printk (KERN_WARNING
+                       "EXT4-fs warning: maximal mount count reached, "
+                       "running e2fsck is recommended\n");
+       else if (le32_to_cpu(es->s_checkinterval) &&
+               (le32_to_cpu(es->s_lastcheck) +
+                       le32_to_cpu(es->s_checkinterval) <= get_seconds()))
+               printk (KERN_WARNING
+                       "EXT4-fs warning: checktime reached, "
+                       "running e2fsck is recommended\n");
+#if 0
+               /* @@@ We _will_ want to clear the valid bit if we find
+                * inconsistencies, to force a fsck at reboot.  But for
+                * a plain journaled filesystem we can keep it set as
+                * valid forever! :)
+                */
+       es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) & ~EXT4_VALID_FS);
+#endif
+       if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
+               es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
+       es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
+       es->s_mtime = cpu_to_le32(get_seconds());
+       ext4_update_dynamic_rev(sb);
+       EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+
+       ext4_commit_super(sb, es, 1);
+       if (test_opt(sb, DEBUG))
+               printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%lu, "
+                               "bpg=%lu, ipg=%lu, mo=%04lx]\n",
+                       sb->s_blocksize,
+                       sbi->s_groups_count,
+                       EXT4_BLOCKS_PER_GROUP(sb),
+                       EXT4_INODES_PER_GROUP(sb),
+                       sbi->s_mount_opt);
+
+       printk(KERN_INFO "EXT4 FS on %s, ", sb->s_id);
+       if (EXT4_SB(sb)->s_journal->j_inode == NULL) {
+               char b[BDEVNAME_SIZE];
+
+               printk("external journal on %s\n",
+                       bdevname(EXT4_SB(sb)->s_journal->j_dev, b));
+       } else {
+               printk("internal journal\n");
+       }
+       return res;
+}
+
+/* Called at mount-time, super-block is locked */
+static int ext4_check_descriptors (struct super_block * sb)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
+       ext4_fsblk_t last_block;
+       ext4_fsblk_t block_bitmap;
+       ext4_fsblk_t inode_bitmap;
+       ext4_fsblk_t inode_table;
+       struct ext4_group_desc * gdp = NULL;
+       int desc_block = 0;
+       int i;
+
+       ext4_debug ("Checking group descriptors");
+
+       for (i = 0; i < sbi->s_groups_count; i++)
+       {
+               if (i == sbi->s_groups_count - 1)
+                       last_block = ext4_blocks_count(sbi->s_es) - 1;
+               else
+                       last_block = first_block +
+                               (EXT4_BLOCKS_PER_GROUP(sb) - 1);
+
+               if ((i % EXT4_DESC_PER_BLOCK(sb)) == 0)
+                       gdp = (struct ext4_group_desc *)
+                                       sbi->s_group_desc[desc_block++]->b_data;
+               block_bitmap = ext4_block_bitmap(sb, gdp);
+               if (block_bitmap < first_block || block_bitmap > last_block)
+               {
+                       ext4_error (sb, "ext4_check_descriptors",
+                                   "Block bitmap for group %d"
+                                   " not in group (block %llu)!",
+                                   i, block_bitmap);
+                       return 0;
+               }
+               inode_bitmap = ext4_inode_bitmap(sb, gdp);
+               if (inode_bitmap < first_block || inode_bitmap > last_block)
+               {
+                       ext4_error (sb, "ext4_check_descriptors",
+                                   "Inode bitmap for group %d"
+                                   " not in group (block %llu)!",
+                                   i, inode_bitmap);
+                       return 0;
+               }
+               inode_table = ext4_inode_table(sb, gdp);
+               if (inode_table < first_block ||
+                   inode_table + sbi->s_itb_per_group > last_block)
+               {
+                       ext4_error (sb, "ext4_check_descriptors",
+                                   "Inode table for group %d"
+                                   " not in group (block %llu)!",
+                                   i, inode_table);
+                       return 0;
+               }
+               first_block += EXT4_BLOCKS_PER_GROUP(sb);
+               gdp = (struct ext4_group_desc *)
+                       ((__u8 *)gdp + EXT4_DESC_SIZE(sb));
+       }
+
+       ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb));
+       sbi->s_es->s_free_inodes_count=cpu_to_le32(ext4_count_free_inodes(sb));
+       return 1;
+}
+
+
+/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
+ * the superblock) which were deleted from all directories, but held open by
+ * a process at the time of a crash.  We walk the list and try to delete these
+ * inodes at recovery time (only with a read-write filesystem).
+ *
+ * In order to keep the orphan inode chain consistent during traversal (in
+ * case of crash during recovery), we link each inode into the superblock
+ * orphan list_head and handle it the same way as an inode deletion during
+ * normal operation (which journals the operations for us).
+ *
+ * We only do an iget() and an iput() on each inode, which is very safe if we
+ * accidentally point at an in-use or already deleted inode.  The worst that
+ * can happen in this case is that we get a "bit already cleared" message from
+ * ext4_free_inode().  The only reason we would point at a wrong inode is if
+ * e2fsck was run on this filesystem, and it must have already done the orphan
+ * inode cleanup for us, so we can safely abort without any further action.
+ */
+static void ext4_orphan_cleanup (struct super_block * sb,
+                                struct ext4_super_block * es)
+{
+       unsigned int s_flags = sb->s_flags;
+       int nr_orphans = 0, nr_truncates = 0;
+#ifdef CONFIG_QUOTA
+       int i;
+#endif
+       if (!es->s_last_orphan) {
+               jbd_debug(4, "no orphan inodes to clean up\n");
+               return;
+       }
+
+       if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
+               if (es->s_last_orphan)
+                       jbd_debug(1, "Errors on filesystem, "
+                                 "clearing orphan list.\n");
+               es->s_last_orphan = 0;
+               jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
+               return;
+       }
+
+       if (s_flags & MS_RDONLY) {
+               printk(KERN_INFO "EXT4-fs: %s: orphan cleanup on readonly fs\n",
+                      sb->s_id);
+               sb->s_flags &= ~MS_RDONLY;
+       }
+#ifdef CONFIG_QUOTA
+       /* Needed for iput() to work correctly and not trash data */
+       sb->s_flags |= MS_ACTIVE;
+       /* Turn on quotas so that they are updated correctly */
+       for (i = 0; i < MAXQUOTAS; i++) {
+               if (EXT4_SB(sb)->s_qf_names[i]) {
+                       int ret = ext4_quota_on_mount(sb, i);
+                       if (ret < 0)
+                               printk(KERN_ERR
+                                       "EXT4-fs: Cannot turn on journalled "
+                                       "quota: error %d\n", ret);
+               }
+       }
+#endif
+
+       while (es->s_last_orphan) {
+               struct inode *inode;
+
+               if (!(inode =
+                     ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)))) {
+                       es->s_last_orphan = 0;
+                       break;
+               }
+
+               list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
+               DQUOT_INIT(inode);
+               if (inode->i_nlink) {
+                       printk(KERN_DEBUG
+                               "%s: truncating inode %lu to %Ld bytes\n",
+                               __FUNCTION__, inode->i_ino, inode->i_size);
+                       jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
+                                 inode->i_ino, inode->i_size);
+                       ext4_truncate(inode);
+                       nr_truncates++;
+               } else {
+                       printk(KERN_DEBUG
+                               "%s: deleting unreferenced inode %lu\n",
+                               __FUNCTION__, inode->i_ino);
+                       jbd_debug(2, "deleting unreferenced inode %lu\n",
+                                 inode->i_ino);
+                       nr_orphans++;
+               }
+               iput(inode);  /* The delete magic happens here! */
+       }
+
+#define PLURAL(x) (x), ((x)==1) ? "" : "s"
+
+       if (nr_orphans)
+               printk(KERN_INFO "EXT4-fs: %s: %d orphan inode%s deleted\n",
+                      sb->s_id, PLURAL(nr_orphans));
+       if (nr_truncates)
+               printk(KERN_INFO "EXT4-fs: %s: %d truncate%s cleaned up\n",
+                      sb->s_id, PLURAL(nr_truncates));
+#ifdef CONFIG_QUOTA
+       /* Turn quotas off */
+       for (i = 0; i < MAXQUOTAS; i++) {
+               if (sb_dqopt(sb)->files[i])
+                       vfs_quota_off(sb, i);
+       }
+#endif
+       sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+}
+
+#define log2(n) ffz(~(n))
+
+/*
+ * Maximal file size.  There is a direct, and {,double-,triple-}indirect
+ * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
+ * We need to be 1 filesystem block less than the 2^32 sector limit.
+ */
+static loff_t ext4_max_size(int bits)
+{
+       loff_t res = EXT4_NDIR_BLOCKS;
+       /* This constant is calculated to be the largest file size for a
+        * dense, 4k-blocksize file such that the total number of
+        * sectors in the file, including data and all indirect blocks,
+        * does not exceed 2^32. */
+       const loff_t upper_limit = 0x1ff7fffd000LL;
+
+       res += 1LL << (bits-2);
+       res += 1LL << (2*(bits-2));
+       res += 1LL << (3*(bits-2));
+       res <<= bits;
+       if (res > upper_limit)
+               res = upper_limit;
+       return res;
+}
+
+static ext4_fsblk_t descriptor_loc(struct super_block *sb,
+                               ext4_fsblk_t logical_sb_block, int nr)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       unsigned long bg, first_meta_bg;
+       int has_super = 0;
+
+       first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
+
+       if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
+           nr < first_meta_bg)
+               return logical_sb_block + nr + 1;
+       bg = sbi->s_desc_per_block * nr;
+       if (ext4_bg_has_super(sb, bg))
+               has_super = 1;
+       return (has_super + ext4_group_first_block_no(sb, bg));
+}
+
+
+static int ext4_fill_super (struct super_block *sb, void *data, int silent)
+{
+       struct buffer_head * bh;
+       struct ext4_super_block *es = NULL;
+       struct ext4_sb_info *sbi;
+       ext4_fsblk_t block;
+       ext4_fsblk_t sb_block = get_sb_block(&data);
+       ext4_fsblk_t logical_sb_block;
+       unsigned long offset = 0;
+       unsigned int journal_inum = 0;
+       unsigned long journal_devnum = 0;
+       unsigned long def_mount_opts;
+       struct inode *root;
+       int blocksize;
+       int hblock;
+       int db_count;
+       int i;
+       int needs_recovery;
+       __le32 features;
+       __u64 blocks_count;
+
+       sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+       if (!sbi)
+               return -ENOMEM;
+       sb->s_fs_info = sbi;
+       sbi->s_mount_opt = 0;
+       sbi->s_resuid = EXT4_DEF_RESUID;
+       sbi->s_resgid = EXT4_DEF_RESGID;
+
+       unlock_kernel();
+
+       blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
+       if (!blocksize) {
+               printk(KERN_ERR "EXT4-fs: unable to set blocksize\n");
+               goto out_fail;
+       }
+
+       /*
+        * The ext4 superblock will not be buffer aligned for other than 1kB
+        * block sizes.  We need to calculate the offset from buffer start.
+        */
+       if (blocksize != EXT4_MIN_BLOCK_SIZE) {
+               logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
+               offset = do_div(logical_sb_block, blocksize);
+       } else {
+               logical_sb_block = sb_block;
+       }
+
+       if (!(bh = sb_bread(sb, logical_sb_block))) {
+               printk (KERN_ERR "EXT4-fs: unable to read superblock\n");
+               goto out_fail;
+       }
+       /*
+        * Note: s_es must be initialized as soon as possible because
+        *       some ext4 macro-instructions depend on its value
+        */
+       es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
+       sbi->s_es = es;
+       sb->s_magic = le16_to_cpu(es->s_magic);
+       if (sb->s_magic != EXT4_SUPER_MAGIC)
+               goto cantfind_ext4;
+
+       /* Set defaults before we parse the mount options */
+       def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
+       if (def_mount_opts & EXT4_DEFM_DEBUG)
+               set_opt(sbi->s_mount_opt, DEBUG);
+       if (def_mount_opts & EXT4_DEFM_BSDGROUPS)
+               set_opt(sbi->s_mount_opt, GRPID);
+       if (def_mount_opts & EXT4_DEFM_UID16)
+               set_opt(sbi->s_mount_opt, NO_UID32);
+       if (def_mount_opts & EXT4_DEFM_XATTR_USER)
+               set_opt(sbi->s_mount_opt, XATTR_USER);
+       if (def_mount_opts & EXT4_DEFM_ACL)
+               set_opt(sbi->s_mount_opt, POSIX_ACL);
+       if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
+               sbi->s_mount_opt |= EXT4_MOUNT_JOURNAL_DATA;
+       else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
+               sbi->s_mount_opt |= EXT4_MOUNT_ORDERED_DATA;
+       else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
+               sbi->s_mount_opt |= EXT4_MOUNT_WRITEBACK_DATA;
+
+       if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
+               set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+       else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_RO)
+               set_opt(sbi->s_mount_opt, ERRORS_RO);
+       else
+               set_opt(sbi->s_mount_opt, ERRORS_CONT);
+
+       sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
+       sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
+
+       set_opt(sbi->s_mount_opt, RESERVATION);
+
+       if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
+                           NULL, 0))
+               goto failed_mount;
+
+       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+               ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+
+       if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
+           (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
+            EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
+            EXT4_HAS_INCOMPAT_FEATURE(sb, ~0U)))
+               printk(KERN_WARNING
+                      "EXT4-fs warning: feature flags set on rev 0 fs, "
+                      "running e2fsck is recommended\n");
+       /*
+        * Check feature flags regardless of the revision level, since we
+        * previously didn't change the revision level when setting the flags,
+        * so there is a chance incompat flags are set on a rev 0 filesystem.
+        */
+       features = EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP);
+       if (features) {
+               printk(KERN_ERR "EXT4-fs: %s: couldn't mount because of "
+                      "unsupported optional features (%x).\n",
+                      sb->s_id, le32_to_cpu(features));
+               goto failed_mount;
+       }
+       features = EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP);
+       if (!(sb->s_flags & MS_RDONLY) && features) {
+               printk(KERN_ERR "EXT4-fs: %s: couldn't mount RDWR because of "
+                      "unsupported optional features (%x).\n",
+                      sb->s_id, le32_to_cpu(features));
+               goto failed_mount;
+       }
+       blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+
+       if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+           blocksize > EXT4_MAX_BLOCK_SIZE) {
+               printk(KERN_ERR
+                      "EXT4-fs: Unsupported filesystem blocksize %d on %s.\n",
+                      blocksize, sb->s_id);
+               goto failed_mount;
+       }
+
+       hblock = bdev_hardsect_size(sb->s_bdev);
+       if (sb->s_blocksize != blocksize) {
+               /*
+                * Make sure the blocksize for the filesystem is larger
+                * than the hardware sectorsize for the machine.
+                */
+               if (blocksize < hblock) {
+                       printk(KERN_ERR "EXT4-fs: blocksize %d too small for "
+                              "device blocksize %d.\n", blocksize, hblock);
+                       goto failed_mount;
+               }
+
+               brelse (bh);
+               sb_set_blocksize(sb, blocksize);
+               logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
+               offset = do_div(logical_sb_block, blocksize);
+               bh = sb_bread(sb, logical_sb_block);
+               if (!bh) {
+                       printk(KERN_ERR
+                              "EXT4-fs: Can't read superblock on 2nd try.\n");
+                       goto failed_mount;
+               }
+               es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
+               sbi->s_es = es;
+               if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
+                       printk (KERN_ERR
+                               "EXT4-fs: Magic mismatch, very weird !\n");
+                       goto failed_mount;
+               }
+       }
+
+       sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits);
+
+       if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
+               sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
+               sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
+       } else {
+               sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
+               sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+               if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
+                   (sbi->s_inode_size & (sbi->s_inode_size - 1)) ||
+                   (sbi->s_inode_size > blocksize)) {
+                       printk (KERN_ERR
+                               "EXT4-fs: unsupported inode size: %d\n",
+                               sbi->s_inode_size);
+                       goto failed_mount;
+               }
+       }
+       sbi->s_frag_size = EXT4_MIN_FRAG_SIZE <<
+                                  le32_to_cpu(es->s_log_frag_size);
+       if (blocksize != sbi->s_frag_size) {
+               printk(KERN_ERR
+                      "EXT4-fs: fragsize %lu != blocksize %u (unsupported)\n",
+                      sbi->s_frag_size, blocksize);
+               goto failed_mount;
+       }
+       sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) {
+               if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
+                   sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
+                   sbi->s_desc_size & (sbi->s_desc_size - 1)) {
+                       printk(KERN_ERR
+                              "EXT4-fs: unsupported descriptor size %lu\n",
+                              sbi->s_desc_size);
+                       goto failed_mount;
+               }
+       } else
+               sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
+       sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
+       sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
+       sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
+       if (EXT4_INODE_SIZE(sb) == 0)
+               goto cantfind_ext4;
+       sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
+       if (sbi->s_inodes_per_block == 0)
+               goto cantfind_ext4;
+       sbi->s_itb_per_group = sbi->s_inodes_per_group /
+                                       sbi->s_inodes_per_block;
+       sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
+       sbi->s_sbh = bh;
+       sbi->s_mount_state = le16_to_cpu(es->s_state);
+       sbi->s_addr_per_block_bits = log2(EXT4_ADDR_PER_BLOCK(sb));
+       sbi->s_desc_per_block_bits = log2(EXT4_DESC_PER_BLOCK(sb));
+       for (i=0; i < 4; i++)
+               sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
+       sbi->s_def_hash_version = es->s_def_hash_version;
+
+       if (sbi->s_blocks_per_group > blocksize * 8) {
+               printk (KERN_ERR
+                       "EXT4-fs: #blocks per group too big: %lu\n",
+                       sbi->s_blocks_per_group);
+               goto failed_mount;
+       }
+       if (sbi->s_frags_per_group > blocksize * 8) {
+               printk (KERN_ERR
+                       "EXT4-fs: #fragments per group too big: %lu\n",
+                       sbi->s_frags_per_group);
+               goto failed_mount;
+       }
+       if (sbi->s_inodes_per_group > blocksize * 8) {
+               printk (KERN_ERR
+                       "EXT4-fs: #inodes per group too big: %lu\n",
+                       sbi->s_inodes_per_group);
+               goto failed_mount;
+       }
+
+       if (ext4_blocks_count(es) >
+                   (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
+               printk(KERN_ERR "EXT4-fs: filesystem on %s:"
+                       " too large to mount safely\n", sb->s_id);
+               if (sizeof(sector_t) < 8)
+                       printk(KERN_WARNING "EXT4-fs: CONFIG_LBD not "
+                                       "enabled\n");
+               goto failed_mount;
+       }
+
+       if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
+               goto cantfind_ext4;
+       blocks_count = (ext4_blocks_count(es) -
+                       le32_to_cpu(es->s_first_data_block) +
+                       EXT4_BLOCKS_PER_GROUP(sb) - 1);
+       do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
+       sbi->s_groups_count = blocks_count;
+       db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+                  EXT4_DESC_PER_BLOCK(sb);
+       sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
+                                   GFP_KERNEL);
+       if (sbi->s_group_desc == NULL) {
+               printk (KERN_ERR "EXT4-fs: not enough memory\n");
+               goto failed_mount;
+       }
+
+       bgl_lock_init(&sbi->s_blockgroup_lock);
+
+       for (i = 0; i < db_count; i++) {
+               block = descriptor_loc(sb, logical_sb_block, i);
+               sbi->s_group_desc[i] = sb_bread(sb, block);
+               if (!sbi->s_group_desc[i]) {
+                       printk (KERN_ERR "EXT4-fs: "
+                               "can't read group descriptor %d\n", i);
+                       db_count = i;
+                       goto failed_mount2;
+               }
+       }
+       if (!ext4_check_descriptors (sb)) {
+               printk(KERN_ERR "EXT4-fs: group descriptors corrupted!\n");
+               goto failed_mount2;
+       }
+       sbi->s_gdb_count = db_count;
+       get_random_bytes(&sbi->s_next_generation, sizeof(u32));
+       spin_lock_init(&sbi->s_next_gen_lock);
+
+       percpu_counter_init(&sbi->s_freeblocks_counter,
+               ext4_count_free_blocks(sb));
+       percpu_counter_init(&sbi->s_freeinodes_counter,
+               ext4_count_free_inodes(sb));
+       percpu_counter_init(&sbi->s_dirs_counter,
+               ext4_count_dirs(sb));
+
+       /* per fileystem reservation list head & lock */
+       spin_lock_init(&sbi->s_rsv_window_lock);
+       sbi->s_rsv_window_root = RB_ROOT;
+       /* Add a single, static dummy reservation to the start of the
+        * reservation window list --- it gives us a placeholder for
+        * append-at-start-of-list which makes the allocation logic
+        * _much_ simpler. */
+       sbi->s_rsv_window_head.rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
+       sbi->s_rsv_window_head.rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
+       sbi->s_rsv_window_head.rsv_alloc_hit = 0;
+       sbi->s_rsv_window_head.rsv_goal_size = 0;
+       ext4_rsv_window_add(sb, &sbi->s_rsv_window_head);
+
+       /*
+        * set up enough so that it can read an inode
+        */
+       sb->s_op = &ext4_sops;
+       sb->s_export_op = &ext4_export_ops;
+       sb->s_xattr = ext4_xattr_handlers;
+#ifdef CONFIG_QUOTA
+       sb->s_qcop = &ext4_qctl_operations;
+       sb->dq_op = &ext4_quota_operations;
+#endif
+       INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
+
+       sb->s_root = NULL;
+
+       needs_recovery = (es->s_last_orphan != 0 ||
+                         EXT4_HAS_INCOMPAT_FEATURE(sb,
+                                   EXT4_FEATURE_INCOMPAT_RECOVER));
+
+       /*
+        * The first inode we look at is the journal inode.  Don't try
+        * root first: it may be modified in the journal!
+        */
+       if (!test_opt(sb, NOLOAD) &&
+           EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
+               if (ext4_load_journal(sb, es, journal_devnum))
+                       goto failed_mount3;
+       } else if (journal_inum) {
+               if (ext4_create_journal(sb, es, journal_inum))
+                       goto failed_mount3;
+       } else {
+               if (!silent)
+                       printk (KERN_ERR
+                               "ext4: No journal on filesystem on %s\n",
+                               sb->s_id);
+               goto failed_mount3;
+       }
+
+       /* We have now updated the journal if required, so we can
+        * validate the data journaling mode. */
+       switch (test_opt(sb, DATA_FLAGS)) {
+       case 0:
+               /* No mode set, assume a default based on the journal
+                * capabilities: ORDERED_DATA if the journal can
+                * cope, else JOURNAL_DATA
+                */
+               if (jbd2_journal_check_available_features
+                   (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
+                       set_opt(sbi->s_mount_opt, ORDERED_DATA);
+               else
+                       set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+               break;
+
+       case EXT4_MOUNT_ORDERED_DATA:
+       case EXT4_MOUNT_WRITEBACK_DATA:
+               if (!jbd2_journal_check_available_features
+                   (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
+                       printk(KERN_ERR "EXT4-fs: Journal does not support "
+                              "requested data journaling mode\n");
+                       goto failed_mount4;
+               }
+       default:
+               break;
+       }
+
+       if (test_opt(sb, NOBH)) {
+               if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
+                       printk(KERN_WARNING "EXT4-fs: Ignoring nobh option - "
+                               "its supported only with writeback mode\n");
+                       clear_opt(sbi->s_mount_opt, NOBH);
+               }
+       }
+       /*
+        * The jbd2_journal_load will have done any necessary log recovery,
+        * so we can safely mount the rest of the filesystem now.
+        */
+
+       root = iget(sb, EXT4_ROOT_INO);
+       sb->s_root = d_alloc_root(root);
+       if (!sb->s_root) {
+               printk(KERN_ERR "EXT4-fs: get root inode failed\n");
+               iput(root);
+               goto failed_mount4;
+       }
+       if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+               dput(sb->s_root);
+               sb->s_root = NULL;
+               printk(KERN_ERR "EXT4-fs: corrupt root inode, run e2fsck\n");
+               goto failed_mount4;
+       }
+
+       ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+       /*
+        * akpm: core read_super() calls in here with the superblock locked.
+        * That deadlocks, because orphan cleanup needs to lock the superblock
+        * in numerous places.  Here we just pop the lock - it's relatively
+        * harmless, because we are now ready to accept write_super() requests,
+        * and aviro says that's the only reason for hanging onto the
+        * superblock lock.
+        */
+       EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
+       ext4_orphan_cleanup(sb, es);
+       EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
+       if (needs_recovery)
+               printk (KERN_INFO "EXT4-fs: recovery complete.\n");
+       ext4_mark_recovery_complete(sb, es);
+       printk (KERN_INFO "EXT4-fs: mounted filesystem with %s data mode.\n",
+               test_opt(sb,DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ? "journal":
+               test_opt(sb,DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA ? "ordered":
+               "writeback");
+
+       ext4_ext_init(sb);
+
+       lock_kernel();
+       return 0;
+
+cantfind_ext4:
+       if (!silent)
+               printk(KERN_ERR "VFS: Can't find ext4 filesystem on dev %s.\n",
+                      sb->s_id);
+       goto failed_mount;
+
+failed_mount4:
+       jbd2_journal_destroy(sbi->s_journal);
+failed_mount3:
+       percpu_counter_destroy(&sbi->s_freeblocks_counter);
+       percpu_counter_destroy(&sbi->s_freeinodes_counter);
+       percpu_counter_destroy(&sbi->s_dirs_counter);
+failed_mount2:
+       for (i = 0; i < db_count; i++)
+               brelse(sbi->s_group_desc[i]);
+       kfree(sbi->s_group_desc);
+failed_mount:
+#ifdef CONFIG_QUOTA
+       for (i = 0; i < MAXQUOTAS; i++)
+               kfree(sbi->s_qf_names[i]);
+#endif
+       ext4_blkdev_remove(sbi);
+       brelse(bh);
+out_fail:
+       sb->s_fs_info = NULL;
+       kfree(sbi);
+       lock_kernel();
+       return -EINVAL;
+}
+
+/*
+ * Setup any per-fs journal parameters now.  We'll do this both on
+ * initial mount, once the journal has been initialised but before we've
+ * done any recovery; and again on any subsequent remount.
+ */
+static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (sbi->s_commit_interval)
+               journal->j_commit_interval = sbi->s_commit_interval;
+       /* We could also set up an ext4-specific default for the commit
+        * interval here, but for now we'll just fall back to the jbd
+        * default. */
+
+       spin_lock(&journal->j_state_lock);
+       if (test_opt(sb, BARRIER))
+               journal->j_flags |= JBD2_BARRIER;
+       else
+               journal->j_flags &= ~JBD2_BARRIER;
+       spin_unlock(&journal->j_state_lock);
+}
+
+static journal_t *ext4_get_journal(struct super_block *sb,
+                                  unsigned int journal_inum)
+{
+       struct inode *journal_inode;
+       journal_t *journal;
+
+       /* First, test for the existence of a valid inode on disk.  Bad
+        * things happen if we iget() an unused inode, as the subsequent
+        * iput() will try to delete it. */
+
+       journal_inode = iget(sb, journal_inum);
+       if (!journal_inode) {
+               printk(KERN_ERR "EXT4-fs: no journal found.\n");
+               return NULL;
+       }
+       if (!journal_inode->i_nlink) {
+               make_bad_inode(journal_inode);
+               iput(journal_inode);
+               printk(KERN_ERR "EXT4-fs: journal inode is deleted.\n");
+               return NULL;
+       }
+
+       jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
+                 journal_inode, journal_inode->i_size);
+       if (is_bad_inode(journal_inode) || !S_ISREG(journal_inode->i_mode)) {
+               printk(KERN_ERR "EXT4-fs: invalid journal inode.\n");
+               iput(journal_inode);
+               return NULL;
+       }
+
+       journal = jbd2_journal_init_inode(journal_inode);
+       if (!journal) {
+               printk(KERN_ERR "EXT4-fs: Could not load journal inode\n");
+               iput(journal_inode);
+               return NULL;
+       }
+       journal->j_private = sb;
+       ext4_init_journal_params(sb, journal);
+       return journal;
+}
+
+static journal_t *ext4_get_dev_journal(struct super_block *sb,
+                                      dev_t j_dev)
+{
+       struct buffer_head * bh;
+       journal_t *journal;
+       ext4_fsblk_t start;
+       ext4_fsblk_t len;
+       int hblock, blocksize;
+       ext4_fsblk_t sb_block;
+       unsigned long offset;
+       struct ext4_super_block * es;
+       struct block_device *bdev;
+
+       bdev = ext4_blkdev_get(j_dev);
+       if (bdev == NULL)
+               return NULL;
+
+       if (bd_claim(bdev, sb)) {
+               printk(KERN_ERR
+                       "EXT4: failed to claim external journal device.\n");
+               blkdev_put(bdev);
+               return NULL;
+       }
+
+       blocksize = sb->s_blocksize;
+       hblock = bdev_hardsect_size(bdev);
+       if (blocksize < hblock) {
+               printk(KERN_ERR
+                       "EXT4-fs: blocksize too small for journal device.\n");
+               goto out_bdev;
+       }
+
+       sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
+       offset = EXT4_MIN_BLOCK_SIZE % blocksize;
+       set_blocksize(bdev, blocksize);
+       if (!(bh = __bread(bdev, sb_block, blocksize))) {
+               printk(KERN_ERR "EXT4-fs: couldn't read superblock of "
+                      "external journal\n");
+               goto out_bdev;
+       }
+
+       es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
+       if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
+           !(le32_to_cpu(es->s_feature_incompat) &
+             EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
+               printk(KERN_ERR "EXT4-fs: external journal has "
+                                       "bad superblock\n");
+               brelse(bh);
+               goto out_bdev;
+       }
+
+       if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
+               printk(KERN_ERR "EXT4-fs: journal UUID does not match\n");
+               brelse(bh);
+               goto out_bdev;
+       }
+
+       len = ext4_blocks_count(es);
+       start = sb_block + 1;
+       brelse(bh);     /* we're done with the superblock */
+
+       journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
+                                       start, len, blocksize);
+       if (!journal) {
+               printk(KERN_ERR "EXT4-fs: failed to create device journal\n");
+               goto out_bdev;
+       }
+       journal->j_private = sb;
+       ll_rw_block(READ, 1, &journal->j_sb_buffer);
+       wait_on_buffer(journal->j_sb_buffer);
+       if (!buffer_uptodate(journal->j_sb_buffer)) {
+               printk(KERN_ERR "EXT4-fs: I/O error on journal device\n");
+               goto out_journal;
+       }
+       if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
+               printk(KERN_ERR "EXT4-fs: External journal has more than one "
+                                       "user (unsupported) - %d\n",
+                       be32_to_cpu(journal->j_superblock->s_nr_users));
+               goto out_journal;
+       }
+       EXT4_SB(sb)->journal_bdev = bdev;
+       ext4_init_journal_params(sb, journal);
+       return journal;
+out_journal:
+       jbd2_journal_destroy(journal);
+out_bdev:
+       ext4_blkdev_put(bdev);
+       return NULL;
+}
+
+static int ext4_load_journal(struct super_block *sb,
+                            struct ext4_super_block *es,
+                            unsigned long journal_devnum)
+{
+       journal_t *journal;
+       unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
+       dev_t journal_dev;
+       int err = 0;
+       int really_read_only;
+
+       if (journal_devnum &&
+           journal_devnum != le32_to_cpu(es->s_journal_dev)) {
+               printk(KERN_INFO "EXT4-fs: external journal device major/minor "
+                       "numbers have changed\n");
+               journal_dev = new_decode_dev(journal_devnum);
+       } else
+               journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
+
+       really_read_only = bdev_read_only(sb->s_bdev);
+
+       /*
+        * Are we loading a blank journal or performing recovery after a
+        * crash?  For recovery, we need to check in advance whether we
+        * can get read-write access to the device.
+        */
+
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
+               if (sb->s_flags & MS_RDONLY) {
+                       printk(KERN_INFO "EXT4-fs: INFO: recovery "
+                                       "required on readonly filesystem.\n");
+                       if (really_read_only) {
+                               printk(KERN_ERR "EXT4-fs: write access "
+                                       "unavailable, cannot proceed.\n");
+                               return -EROFS;
+                       }
+                       printk (KERN_INFO "EXT4-fs: write access will "
+                                       "be enabled during recovery.\n");
+               }
+       }
+
+       if (journal_inum && journal_dev) {
+               printk(KERN_ERR "EXT4-fs: filesystem has both journal "
+                      "and inode journals!\n");
+               return -EINVAL;
+       }
+
+       if (journal_inum) {
+               if (!(journal = ext4_get_journal(sb, journal_inum)))
+                       return -EINVAL;
+       } else {
+               if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
+                       return -EINVAL;
+       }
+
+       if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
+               err = jbd2_journal_update_format(journal);
+               if (err)  {
+                       printk(KERN_ERR "EXT4-fs: error updating journal.\n");
+                       jbd2_journal_destroy(journal);
+                       return err;
+               }
+       }
+
+       if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER))
+               err = jbd2_journal_wipe(journal, !really_read_only);
+       if (!err)
+               err = jbd2_journal_load(journal);
+
+       if (err) {
+               printk(KERN_ERR "EXT4-fs: error loading journal.\n");
+               jbd2_journal_destroy(journal);
+               return err;
+       }
+
+       EXT4_SB(sb)->s_journal = journal;
+       ext4_clear_journal_err(sb, es);
+
+       if (journal_devnum &&
+           journal_devnum != le32_to_cpu(es->s_journal_dev)) {
+               es->s_journal_dev = cpu_to_le32(journal_devnum);
+               sb->s_dirt = 1;
+
+               /* Make sure we flush the recovery flag to disk. */
+               ext4_commit_super(sb, es, 1);
+       }
+
+       return 0;
+}
+
+static int ext4_create_journal(struct super_block * sb,
+                              struct ext4_super_block * es,
+                              unsigned int journal_inum)
+{
+       journal_t *journal;
+
+       if (sb->s_flags & MS_RDONLY) {
+               printk(KERN_ERR "EXT4-fs: readonly filesystem when trying to "
+                               "create journal.\n");
+               return -EROFS;
+       }
+
+       if (!(journal = ext4_get_journal(sb, journal_inum)))
+               return -EINVAL;
+
+       printk(KERN_INFO "EXT4-fs: creating new journal on inode %u\n",
+              journal_inum);
+
+       if (jbd2_journal_create(journal)) {
+               printk(KERN_ERR "EXT4-fs: error creating journal.\n");
+               jbd2_journal_destroy(journal);
+               return -EIO;
+       }
+
+       EXT4_SB(sb)->s_journal = journal;
+
+       ext4_update_dynamic_rev(sb);
+       EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+       EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL);
+
+       es->s_journal_inum = cpu_to_le32(journal_inum);
+       sb->s_dirt = 1;
+
+       /* Make sure we flush the recovery flag to disk. */
+       ext4_commit_super(sb, es, 1);
+
+       return 0;
+}
+
+static void ext4_commit_super (struct super_block * sb,
+                              struct ext4_super_block * es,
+                              int sync)
+{
+       struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+
+       if (!sbh)
+               return;
+       es->s_wtime = cpu_to_le32(get_seconds());
+       ext4_free_blocks_count_set(es, ext4_count_free_blocks(sb));
+       es->s_free_inodes_count = cpu_to_le32(ext4_count_free_inodes(sb));
+       BUFFER_TRACE(sbh, "marking dirty");
+       mark_buffer_dirty(sbh);
+       if (sync)
+               sync_dirty_buffer(sbh);
+}
+
+
+/*
+ * Have we just finished recovery?  If so, and if we are mounting (or
+ * remounting) the filesystem readonly, then we will end up with a
+ * consistent fs on disk.  Record that fact.
+ */
+static void ext4_mark_recovery_complete(struct super_block * sb,
+                                       struct ext4_super_block * es)
+{
+       journal_t *journal = EXT4_SB(sb)->s_journal;
+
+       jbd2_journal_lock_updates(journal);
+       jbd2_journal_flush(journal);
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER) &&
+           sb->s_flags & MS_RDONLY) {
+               EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+               sb->s_dirt = 0;
+               ext4_commit_super(sb, es, 1);
+       }
+       jbd2_journal_unlock_updates(journal);
+}
+
+/*
+ * If we are mounting (or read-write remounting) a filesystem whose journal
+ * has recorded an error from a previous lifetime, move that error to the
+ * main filesystem now.
+ */
+static void ext4_clear_journal_err(struct super_block * sb,
+                                  struct ext4_super_block * es)
+{
+       journal_t *journal;
+       int j_errno;
+       const char *errstr;
+
+       journal = EXT4_SB(sb)->s_journal;
+
+       /*
+        * Now check for any error status which may have been recorded in the
+        * journal by a prior ext4_error() or ext4_abort()
+        */
+
+       j_errno = jbd2_journal_errno(journal);
+       if (j_errno) {
+               char nbuf[16];
+
+               errstr = ext4_decode_error(sb, j_errno, nbuf);
+               ext4_warning(sb, __FUNCTION__, "Filesystem error recorded "
+                            "from previous mount: %s", errstr);
+               ext4_warning(sb, __FUNCTION__, "Marking fs in need of "
+                            "filesystem check.");
+
+               EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+               es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+               ext4_commit_super (sb, es, 1);
+
+               jbd2_journal_clear_err(journal);
+       }
+}
+
+/*
+ * Force the running and committing transactions to commit,
+ * and wait on the commit.
+ */
+int ext4_force_commit(struct super_block *sb)
+{
+       journal_t *journal;
+       int ret;
+
+       if (sb->s_flags & MS_RDONLY)
+               return 0;
+
+       journal = EXT4_SB(sb)->s_journal;
+       sb->s_dirt = 0;
+       ret = ext4_journal_force_commit(journal);
+       return ret;
+}
+
+/*
+ * Ext4 always journals updates to the superblock itself, so we don't
+ * have to propagate any other updates to the superblock on disk at this
+ * point.  Just start an async writeback to get the buffers on their way
+ * to the disk.
+ *
+ * This implicitly triggers the writebehind on sync().
+ */
+
+static void ext4_write_super (struct super_block * sb)
+{
+       if (mutex_trylock(&sb->s_lock) != 0)
+               BUG();
+       sb->s_dirt = 0;
+}
+
+static int ext4_sync_fs(struct super_block *sb, int wait)
+{
+       tid_t target;
+
+       sb->s_dirt = 0;
+       if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, &target)) {
+               if (wait)
+                       jbd2_log_wait_commit(EXT4_SB(sb)->s_journal, target);
+       }
+       return 0;
+}
+
+/*
+ * LVM calls this function before a (read-only) snapshot is created.  This
+ * gives us a chance to flush the journal completely and mark the fs clean.
+ */
+static void ext4_write_super_lockfs(struct super_block *sb)
+{
+       sb->s_dirt = 0;
+
+       if (!(sb->s_flags & MS_RDONLY)) {
+               journal_t *journal = EXT4_SB(sb)->s_journal;
+
+               /* Now we set up the journal barrier. */
+               jbd2_journal_lock_updates(journal);
+               jbd2_journal_flush(journal);
+
+               /* Journal blocked and flushed, clear needs_recovery flag. */
+               EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+               ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
+       }
+}
+
+/*
+ * Called by LVM after the snapshot is done.  We need to reset the RECOVER
+ * flag here, even though the filesystem is not technically dirty yet.
+ */
+static void ext4_unlockfs(struct super_block *sb)
+{
+       if (!(sb->s_flags & MS_RDONLY)) {
+               lock_super(sb);
+               /* Reser the needs_recovery flag before the fs is unlocked. */
+               EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+               ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
+               unlock_super(sb);
+               jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
+       }
+}
+
+static int ext4_remount (struct super_block * sb, int * flags, char * data)
+{
+       struct ext4_super_block * es;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       ext4_fsblk_t n_blocks_count = 0;
+       unsigned long old_sb_flags;
+       struct ext4_mount_options old_opts;
+       int err;
+#ifdef CONFIG_QUOTA
+       int i;
+#endif
+
+       /* Store the original options */
+       old_sb_flags = sb->s_flags;
+       old_opts.s_mount_opt = sbi->s_mount_opt;
+       old_opts.s_resuid = sbi->s_resuid;
+       old_opts.s_resgid = sbi->s_resgid;
+       old_opts.s_commit_interval = sbi->s_commit_interval;
+#ifdef CONFIG_QUOTA
+       old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
+       for (i = 0; i < MAXQUOTAS; i++)
+               old_opts.s_qf_names[i] = sbi->s_qf_names[i];
+#endif
+
+       /*
+        * Allow the "check" option to be passed as a remount option.
+        */
+       if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) {
+               err = -EINVAL;
+               goto restore_opts;
+       }
+
+       if (sbi->s_mount_opt & EXT4_MOUNT_ABORT)
+               ext4_abort(sb, __FUNCTION__, "Abort forced by user");
+
+       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+               ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+
+       es = sbi->s_es;
+
+       ext4_init_journal_params(sb, sbi->s_journal);
+
+       if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
+               n_blocks_count > ext4_blocks_count(es)) {
+               if (sbi->s_mount_opt & EXT4_MOUNT_ABORT) {
+                       err = -EROFS;
+                       goto restore_opts;
+               }
+
+               if (*flags & MS_RDONLY) {
+                       /*
+                        * First of all, the unconditional stuff we have to do
+                        * to disable replay of the journal when we next remount
+                        */
+                       sb->s_flags |= MS_RDONLY;
+
+                       /*
+                        * OK, test if we are remounting a valid rw partition
+                        * readonly, and if so set the rdonly flag and then
+                        * mark the partition as valid again.
+                        */
+                       if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
+                           (sbi->s_mount_state & EXT4_VALID_FS))
+                               es->s_state = cpu_to_le16(sbi->s_mount_state);
+
+                       ext4_mark_recovery_complete(sb, es);
+               } else {
+                       __le32 ret;
+                       if ((ret = EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       ~EXT4_FEATURE_RO_COMPAT_SUPP))) {
+                               printk(KERN_WARNING "EXT4-fs: %s: couldn't "
+                                      "remount RDWR because of unsupported "
+                                      "optional features (%x).\n",
+                                      sb->s_id, le32_to_cpu(ret));
+                               err = -EROFS;
+                               goto restore_opts;
+                       }
+                       /*
+                        * Mounting a RDONLY partition read-write, so reread
+                        * and store the current valid flag.  (It may have
+                        * been changed by e2fsck since we originally mounted
+                        * the partition.)
+                        */
+                       ext4_clear_journal_err(sb, es);
+                       sbi->s_mount_state = le16_to_cpu(es->s_state);
+                       if ((err = ext4_group_extend(sb, es, n_blocks_count)))
+                               goto restore_opts;
+                       if (!ext4_setup_super (sb, es, 0))
+                               sb->s_flags &= ~MS_RDONLY;
+               }
+       }
+#ifdef CONFIG_QUOTA
+       /* Release old quota file names */
+       for (i = 0; i < MAXQUOTAS; i++)
+               if (old_opts.s_qf_names[i] &&
+                   old_opts.s_qf_names[i] != sbi->s_qf_names[i])
+                       kfree(old_opts.s_qf_names[i]);
+#endif
+       return 0;
+restore_opts:
+       sb->s_flags = old_sb_flags;
+       sbi->s_mount_opt = old_opts.s_mount_opt;
+       sbi->s_resuid = old_opts.s_resuid;
+       sbi->s_resgid = old_opts.s_resgid;
+       sbi->s_commit_interval = old_opts.s_commit_interval;
+#ifdef CONFIG_QUOTA
+       sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
+       for (i = 0; i < MAXQUOTAS; i++) {
+               if (sbi->s_qf_names[i] &&
+                   old_opts.s_qf_names[i] != sbi->s_qf_names[i])
+                       kfree(sbi->s_qf_names[i]);
+               sbi->s_qf_names[i] = old_opts.s_qf_names[i];
+       }
+#endif
+       return err;
+}
+
+static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
+{
+       struct super_block *sb = dentry->d_sb;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       ext4_fsblk_t overhead;
+       int i;
+
+       if (test_opt (sb, MINIX_DF))
+               overhead = 0;
+       else {
+               unsigned long ngroups;
+               ngroups = EXT4_SB(sb)->s_groups_count;
+               smp_rmb();
+
+               /*
+                * Compute the overhead (FS structures)
+                */
+
+               /*
+                * All of the blocks before first_data_block are
+                * overhead
+                */
+               overhead = le32_to_cpu(es->s_first_data_block);
+
+               /*
+                * Add the overhead attributed to the superblock and
+                * block group descriptors.  If the sparse superblocks
+                * feature is turned on, then not all groups have this.
+                */
+               for (i = 0; i < ngroups; i++) {
+                       overhead += ext4_bg_has_super(sb, i) +
+                               ext4_bg_num_gdb(sb, i);
+                       cond_resched();
+               }
+
+               /*
+                * Every block group has an inode bitmap, a block
+                * bitmap, and an inode table.
+                */
+               overhead += (ngroups * (2 + EXT4_SB(sb)->s_itb_per_group));
+       }
+
+       buf->f_type = EXT4_SUPER_MAGIC;
+       buf->f_bsize = sb->s_blocksize;
+       buf->f_blocks = ext4_blocks_count(es) - overhead;
+       buf->f_bfree = percpu_counter_sum(&sbi->s_freeblocks_counter);
+       buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
+       if (buf->f_bfree < ext4_r_blocks_count(es))
+               buf->f_bavail = 0;
+       buf->f_files = le32_to_cpu(es->s_inodes_count);
+       buf->f_ffree = percpu_counter_sum(&sbi->s_freeinodes_counter);
+       buf->f_namelen = EXT4_NAME_LEN;
+       return 0;
+}
+
+/* Helper function for writing quotas on sync - we need to start transaction before quota file
+ * is locked for write. Otherwise the are possible deadlocks:
+ * Process 1                         Process 2
+ * ext4_create()                     quota_sync()
+ *   jbd2_journal_start()                   write_dquot()
+ *   DQUOT_INIT()                        down(dqio_mutex)
+ *     down(dqio_mutex)                    jbd2_journal_start()
+ *
+ */
+
+#ifdef CONFIG_QUOTA
+
+static inline struct inode *dquot_to_inode(struct dquot *dquot)
+{
+       return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
+}
+
+static int ext4_dquot_initialize(struct inode *inode, int type)
+{
+       handle_t *handle;
+       int ret, err;
+
+       /* We may create quota structure so we need to reserve enough blocks */
+       handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       ret = dquot_initialize(inode, type);
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+       return ret;
+}
+
+static int ext4_dquot_drop(struct inode *inode)
+{
+       handle_t *handle;
+       int ret, err;
+
+       /* We may delete quota structure so we need to reserve enough blocks */
+       handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       ret = dquot_drop(inode);
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+       return ret;
+}
+
+static int ext4_write_dquot(struct dquot *dquot)
+{
+       int ret, err;
+       handle_t *handle;
+       struct inode *inode;
+
+       inode = dquot_to_inode(dquot);
+       handle = ext4_journal_start(inode,
+                                       EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       ret = dquot_commit(dquot);
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+       return ret;
+}
+
+static int ext4_acquire_dquot(struct dquot *dquot)
+{
+       int ret, err;
+       handle_t *handle;
+
+       handle = ext4_journal_start(dquot_to_inode(dquot),
+                                       EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       ret = dquot_acquire(dquot);
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+       return ret;
+}
+
+static int ext4_release_dquot(struct dquot *dquot)
+{
+       int ret, err;
+       handle_t *handle;
+
+       handle = ext4_journal_start(dquot_to_inode(dquot),
+                                       EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       ret = dquot_release(dquot);
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+       return ret;
+}
+
+static int ext4_mark_dquot_dirty(struct dquot *dquot)
+{
+       /* Are we journalling quotas? */
+       if (EXT4_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
+           EXT4_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
+               dquot_mark_dquot_dirty(dquot);
+               return ext4_write_dquot(dquot);
+       } else {
+               return dquot_mark_dquot_dirty(dquot);
+       }
+}
+
+static int ext4_write_info(struct super_block *sb, int type)
+{
+       int ret, err;
+       handle_t *handle;
+
+       /* Data block + inode block */
+       handle = ext4_journal_start(sb->s_root->d_inode, 2);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+       ret = dquot_commit_info(sb, type);
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+       return ret;
+}
+
+/*
+ * Turn on quotas during mount time - we need to find
+ * the quota file and such...
+ */
+static int ext4_quota_on_mount(struct super_block *sb, int type)
+{
+       return vfs_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
+                       EXT4_SB(sb)->s_jquota_fmt, type);
+}
+
+/*
+ * Standard function to be called on quota_on
+ */
+static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+                        char *path)
+{
+       int err;
+       struct nameidata nd;
+
+       if (!test_opt(sb, QUOTA))
+               return -EINVAL;
+       /* Not journalling quota? */
+       if (!EXT4_SB(sb)->s_qf_names[USRQUOTA] &&
+           !EXT4_SB(sb)->s_qf_names[GRPQUOTA])
+               return vfs_quota_on(sb, type, format_id, path);
+       err = path_lookup(path, LOOKUP_FOLLOW, &nd);
+       if (err)
+               return err;
+       /* Quotafile not on the same filesystem? */
+       if (nd.mnt->mnt_sb != sb) {
+               path_release(&nd);
+               return -EXDEV;
+       }
+       /* Quotafile not of fs root? */
+       if (nd.dentry->d_parent->d_inode != sb->s_root->d_inode)
+               printk(KERN_WARNING
+                       "EXT4-fs: Quota file not on filesystem root. "
+                       "Journalled quota will not work.\n");
+       path_release(&nd);
+       return vfs_quota_on(sb, type, format_id, path);
+}
+
+/* Read data from quotafile - avoid pagecache and such because we cannot afford
+ * acquiring the locks... As quota files are never truncated and quota code
+ * itself serializes the operations (and noone else should touch the files)
+ * we don't have to be afraid of races */
+static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
+                              size_t len, loff_t off)
+{
+       struct inode *inode = sb_dqopt(sb)->files[type];
+       sector_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
+       int err = 0;
+       int offset = off & (sb->s_blocksize - 1);
+       int tocopy;
+       size_t toread;
+       struct buffer_head *bh;
+       loff_t i_size = i_size_read(inode);
+
+       if (off > i_size)
+               return 0;
+       if (off+len > i_size)
+               len = i_size-off;
+       toread = len;
+       while (toread > 0) {
+               tocopy = sb->s_blocksize - offset < toread ?
+                               sb->s_blocksize - offset : toread;
+               bh = ext4_bread(NULL, inode, blk, 0, &err);
+               if (err)
+                       return err;
+               if (!bh)        /* A hole? */
+                       memset(data, 0, tocopy);
+               else
+                       memcpy(data, bh->b_data+offset, tocopy);
+               brelse(bh);
+               offset = 0;
+               toread -= tocopy;
+               data += tocopy;
+               blk++;
+       }
+       return len;
+}
+
+/* Write to quotafile (we know the transaction is already started and has
+ * enough credits) */
+static ssize_t ext4_quota_write(struct super_block *sb, int type,
+                               const char *data, size_t len, loff_t off)
+{
+       struct inode *inode = sb_dqopt(sb)->files[type];
+       sector_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
+       int err = 0;
+       int offset = off & (sb->s_blocksize - 1);
+       int tocopy;
+       int journal_quota = EXT4_SB(sb)->s_qf_names[type] != NULL;
+       size_t towrite = len;
+       struct buffer_head *bh;
+       handle_t *handle = journal_current_handle();
+
+       mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+       while (towrite > 0) {
+               tocopy = sb->s_blocksize - offset < towrite ?
+                               sb->s_blocksize - offset : towrite;
+               bh = ext4_bread(handle, inode, blk, 1, &err);
+               if (!bh)
+                       goto out;
+               if (journal_quota) {
+                       err = ext4_journal_get_write_access(handle, bh);
+                       if (err) {
+                               brelse(bh);
+                               goto out;
+                       }
+               }
+               lock_buffer(bh);
+               memcpy(bh->b_data+offset, data, tocopy);
+               flush_dcache_page(bh->b_page);
+               unlock_buffer(bh);
+               if (journal_quota)
+                       err = ext4_journal_dirty_metadata(handle, bh);
+               else {
+                       /* Always do at least ordered writes for quotas */
+                       err = ext4_journal_dirty_data(handle, bh);
+                       mark_buffer_dirty(bh);
+               }
+               brelse(bh);
+               if (err)
+                       goto out;
+               offset = 0;
+               towrite -= tocopy;
+               data += tocopy;
+               blk++;
+       }
+out:
+       if (len == towrite)
+               return err;
+       if (inode->i_size < off+len-towrite) {
+               i_size_write(inode, off+len-towrite);
+               EXT4_I(inode)->i_disksize = inode->i_size;
+       }
+       inode->i_version++;
+       inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       ext4_mark_inode_dirty(handle, inode);
+       mutex_unlock(&inode->i_mutex);
+       return len - towrite;
+}
+
+#endif
+
+static int ext4_get_sb(struct file_system_type *fs_type,
+       int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+{
+       return get_sb_bdev(fs_type, flags, dev_name, data, ext4_fill_super, mnt);
+}
+
+static struct file_system_type ext4dev_fs_type = {
+       .owner          = THIS_MODULE,
+       .name           = "ext4dev",
+       .get_sb         = ext4_get_sb,
+       .kill_sb        = kill_block_super,
+       .fs_flags       = FS_REQUIRES_DEV,
+};
+
+static int __init init_ext4_fs(void)
+{
+       int err = init_ext4_xattr();
+       if (err)
+               return err;
+       err = init_inodecache();
+       if (err)
+               goto out1;
+       err = register_filesystem(&ext4dev_fs_type);
+       if (err)
+               goto out;
+       return 0;
+out:
+       destroy_inodecache();
+out1:
+       exit_ext4_xattr();
+       return err;
+}
+
+static void __exit exit_ext4_fs(void)
+{
+       unregister_filesystem(&ext4dev_fs_type);
+       destroy_inodecache();
+       exit_ext4_xattr();
+}
+
+MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
+MODULE_DESCRIPTION("Fourth Extended Filesystem with extents");
+MODULE_LICENSE("GPL");
+module_init(init_ext4_fs)
+module_exit(exit_ext4_fs)
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
new file mode 100644 (file)
index 0000000..fcf5272
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ *  linux/fs/ext4/symlink.c
+ *
+ * Only fast symlinks left here - the rest is done by generic code. AV, 1999
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/fs/minix/symlink.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  ext4 symlink handling code
+ */
+
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/ext4_fs.h>
+#include <linux/namei.h>
+#include "xattr.h"
+
+static void * ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+       struct ext4_inode_info *ei = EXT4_I(dentry->d_inode);
+       nd_set_link(nd, (char*)ei->i_data);
+       return NULL;
+}
+
+struct inode_operations ext4_symlink_inode_operations = {
+       .readlink       = generic_readlink,
+       .follow_link    = page_follow_link_light,
+       .put_link       = page_put_link,
+#ifdef CONFIG_EXT4DEV_FS_XATTR
+       .setxattr       = generic_setxattr,
+       .getxattr       = generic_getxattr,
+       .listxattr      = ext4_listxattr,
+       .removexattr    = generic_removexattr,
+#endif
+};
+
+struct inode_operations ext4_fast_symlink_inode_operations = {
+       .readlink       = generic_readlink,
+       .follow_link    = ext4_follow_link,
+#ifdef CONFIG_EXT4DEV_FS_XATTR
+       .setxattr       = generic_setxattr,
+       .getxattr       = generic_getxattr,
+       .listxattr      = ext4_listxattr,
+       .removexattr    = generic_removexattr,
+#endif
+};
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
new file mode 100644 (file)
index 0000000..63233cd
--- /dev/null
@@ -0,0 +1,1317 @@
+/*
+ * linux/fs/ext4/xattr.c
+ *
+ * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
+ *
+ * Fix by Harrison Xing <harrison@mountainviewdata.com>.
+ * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
+ * Extended attributes for symlinks and special files added per
+ *  suggestion of Luka Renko <luka.renko@hermes.si>.
+ * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
+ *  Red Hat Inc.
+ * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
+ *  and Andreas Gruenbacher <agruen@suse.de>.
+ */
+
+/*
+ * Extended attributes are stored directly in inodes (on file systems with
+ * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
+ * field contains the block number if an inode uses an additional block. All
+ * attributes must fit in the inode and one additional block. Blocks that
+ * contain the identical set of attributes may be shared among several inodes.
+ * Identical blocks are detected by keeping a cache of blocks that have
+ * recently been accessed.
+ *
+ * The attributes in inodes and on blocks have a different header; the entries
+ * are stored in the same format:
+ *
+ *   +------------------+
+ *   | header           |
+ *   | entry 1          | |
+ *   | entry 2          | | growing downwards
+ *   | entry 3          | v
+ *   | four null bytes  |
+ *   | . . .            |
+ *   | value 1          | ^
+ *   | value 3          | | growing upwards
+ *   | value 2          | |
+ *   +------------------+
+ *
+ * The header is followed by multiple entry descriptors. In disk blocks, the
+ * entry descriptors are kept sorted. In inodes, they are unsorted. The
+ * attribute values are aligned to the end of the block in no specific order.
+ *
+ * Locking strategy
+ * ----------------
+ * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
+ * EA blocks are only changed if they are exclusive to an inode, so
+ * holding xattr_sem also means that nothing but the EA block's reference
+ * count can change. Multiple writers to the same block are synchronized
+ * by the buffer lock.
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/ext4_fs.h>
+#include <linux/mbcache.h>
+#include <linux/quotaops.h>
+#include <linux/rwsem.h>
+#include "xattr.h"
+#include "acl.h"
+
+#define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
+#define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
+#define BFIRST(bh) ENTRY(BHDR(bh)+1)
+#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
+
+#define IHDR(inode, raw_inode) \
+       ((struct ext4_xattr_ibody_header *) \
+               ((void *)raw_inode + \
+                EXT4_GOOD_OLD_INODE_SIZE + \
+                EXT4_I(inode)->i_extra_isize))
+#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
+
+#ifdef EXT4_XATTR_DEBUG
+# define ea_idebug(inode, f...) do { \
+               printk(KERN_DEBUG "inode %s:%lu: ", \
+                       inode->i_sb->s_id, inode->i_ino); \
+               printk(f); \
+               printk("\n"); \
+       } while (0)
+# define ea_bdebug(bh, f...) do { \
+               char b[BDEVNAME_SIZE]; \
+               printk(KERN_DEBUG "block %s:%lu: ", \
+                       bdevname(bh->b_bdev, b), \
+                       (unsigned long) bh->b_blocknr); \
+               printk(f); \
+               printk("\n"); \
+       } while (0)
+#else
+# define ea_idebug(f...)
+# define ea_bdebug(f...)
+#endif
+
+static void ext4_xattr_cache_insert(struct buffer_head *);
+static struct buffer_head *ext4_xattr_cache_find(struct inode *,
+                                                struct ext4_xattr_header *,
+                                                struct mb_cache_entry **);
+static void ext4_xattr_rehash(struct ext4_xattr_header *,
+                             struct ext4_xattr_entry *);
+
+static struct mb_cache *ext4_xattr_cache;
+
+static struct xattr_handler *ext4_xattr_handler_map[] = {
+       [EXT4_XATTR_INDEX_USER]              = &ext4_xattr_user_handler,
+#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
+       [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS]  = &ext4_xattr_acl_access_handler,
+       [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext4_xattr_acl_default_handler,
+#endif
+       [EXT4_XATTR_INDEX_TRUSTED]           = &ext4_xattr_trusted_handler,
+#ifdef CONFIG_EXT4DEV_FS_SECURITY
+       [EXT4_XATTR_INDEX_SECURITY]          = &ext4_xattr_security_handler,
+#endif
+};
+
+struct xattr_handler *ext4_xattr_handlers[] = {
+       &ext4_xattr_user_handler,
+       &ext4_xattr_trusted_handler,
+#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
+       &ext4_xattr_acl_access_handler,
+       &ext4_xattr_acl_default_handler,
+#endif
+#ifdef CONFIG_EXT4DEV_FS_SECURITY
+       &ext4_xattr_security_handler,
+#endif
+       NULL
+};
+
+static inline struct xattr_handler *
+ext4_xattr_handler(int name_index)
+{
+       struct xattr_handler *handler = NULL;
+
+       if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
+               handler = ext4_xattr_handler_map[name_index];
+       return handler;
+}
+
+/*
+ * Inode operation listxattr()
+ *
+ * dentry->d_inode->i_mutex: don't care
+ */
+ssize_t
+ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+       return ext4_xattr_list(dentry->d_inode, buffer, size);
+}
+
+static int
+ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
+{
+       while (!IS_LAST_ENTRY(entry)) {
+               struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
+               if ((void *)next >= end)
+                       return -EIO;
+               entry = next;
+       }
+       return 0;
+}
+
+static inline int
+ext4_xattr_check_block(struct buffer_head *bh)
+{
+       int error;
+
+       if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
+           BHDR(bh)->h_blocks != cpu_to_le32(1))
+               return -EIO;
+       error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
+       return error;
+}
+
+static inline int
+ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
+{
+       size_t value_size = le32_to_cpu(entry->e_value_size);
+
+       if (entry->e_value_block != 0 || value_size > size ||
+           le16_to_cpu(entry->e_value_offs) + value_size > size)
+               return -EIO;
+       return 0;
+}
+
+static int
+ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
+                     const char *name, size_t size, int sorted)
+{
+       struct ext4_xattr_entry *entry;
+       size_t name_len;
+       int cmp = 1;
+
+       if (name == NULL)
+               return -EINVAL;
+       name_len = strlen(name);
+       entry = *pentry;
+       for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+               cmp = name_index - entry->e_name_index;
+               if (!cmp)
+                       cmp = name_len - entry->e_name_len;
+               if (!cmp)
+                       cmp = memcmp(name, entry->e_name, name_len);
+               if (cmp <= 0 && (sorted || cmp == 0))
+                       break;
+       }
+       *pentry = entry;
+       if (!cmp && ext4_xattr_check_entry(entry, size))
+                       return -EIO;
+       return cmp ? -ENODATA : 0;
+}
+
+static int
+ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
+                    void *buffer, size_t buffer_size)
+{
+       struct buffer_head *bh = NULL;
+       struct ext4_xattr_entry *entry;
+       size_t size;
+       int error;
+
+       ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
+                 name_index, name, buffer, (long)buffer_size);
+
+       error = -ENODATA;
+       if (!EXT4_I(inode)->i_file_acl)
+               goto cleanup;
+       ea_idebug(inode, "reading block %u", EXT4_I(inode)->i_file_acl);
+       bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+       if (!bh)
+               goto cleanup;
+       ea_bdebug(bh, "b_count=%d, refcount=%d",
+               atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
+       if (ext4_xattr_check_block(bh)) {
+bad_block:     ext4_error(inode->i_sb, __FUNCTION__,
+                          "inode %lu: bad block %llu", inode->i_ino,
+                          EXT4_I(inode)->i_file_acl);
+               error = -EIO;
+               goto cleanup;
+       }
+       ext4_xattr_cache_insert(bh);
+       entry = BFIRST(bh);
+       error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
+       if (error == -EIO)
+               goto bad_block;
+       if (error)
+               goto cleanup;
+       size = le32_to_cpu(entry->e_value_size);
+       if (buffer) {
+               error = -ERANGE;
+               if (size > buffer_size)
+                       goto cleanup;
+               memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
+                      size);
+       }
+       error = size;
+
+cleanup:
+       brelse(bh);
+       return error;
+}
+
+static int
+ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
+                    void *buffer, size_t buffer_size)
+{
+       struct ext4_xattr_ibody_header *header;
+       struct ext4_xattr_entry *entry;
+       struct ext4_inode *raw_inode;
+       struct ext4_iloc iloc;
+       size_t size;
+       void *end;
+       int error;
+
+       if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
+               return -ENODATA;
+       error = ext4_get_inode_loc(inode, &iloc);
+       if (error)
+               return error;
+       raw_inode = ext4_raw_inode(&iloc);
+       header = IHDR(inode, raw_inode);
+       entry = IFIRST(header);
+       end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+       error = ext4_xattr_check_names(entry, end);
+       if (error)
+               goto cleanup;
+       error = ext4_xattr_find_entry(&entry, name_index, name,
+                                     end - (void *)entry, 0);
+       if (error)
+               goto cleanup;
+       size = le32_to_cpu(entry->e_value_size);
+       if (buffer) {
+               error = -ERANGE;
+               if (size > buffer_size)
+                       goto cleanup;
+               memcpy(buffer, (void *)IFIRST(header) +
+                      le16_to_cpu(entry->e_value_offs), size);
+       }
+       error = size;
+
+cleanup:
+       brelse(iloc.bh);
+       return error;
+}
+
+/*
+ * ext4_xattr_get()
+ *
+ * Copy an extended attribute into the buffer
+ * provided, or compute the buffer size required.
+ * Buffer is NULL to compute the size of the buffer required.
+ *
+ * Returns a negative error number on failure, or the number of bytes
+ * used / required on success.
+ */
+int
+ext4_xattr_get(struct inode *inode, int name_index, const char *name,
+              void *buffer, size_t buffer_size)
+{
+       int error;
+
+       down_read(&EXT4_I(inode)->xattr_sem);
+       error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
+                                    buffer_size);
+       if (error == -ENODATA)
+               error = ext4_xattr_block_get(inode, name_index, name, buffer,
+                                            buffer_size);
+       up_read(&EXT4_I(inode)->xattr_sem);
+       return error;
+}
+
+static int
+ext4_xattr_list_entries(struct inode *inode, struct ext4_xattr_entry *entry,
+                       char *buffer, size_t buffer_size)
+{
+       size_t rest = buffer_size;
+
+       for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+               struct xattr_handler *handler =
+                       ext4_xattr_handler(entry->e_name_index);
+
+               if (handler) {
+                       size_t size = handler->list(inode, buffer, rest,
+                                                   entry->e_name,
+                                                   entry->e_name_len);
+                       if (buffer) {
+                               if (size > rest)
+                                       return -ERANGE;
+                               buffer += size;
+                       }
+                       rest -= size;
+               }
+       }
+       return buffer_size - rest;
+}
+
+static int
+ext4_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
+{
+       struct buffer_head *bh = NULL;
+       int error;
+
+       ea_idebug(inode, "buffer=%p, buffer_size=%ld",
+                 buffer, (long)buffer_size);
+
+       error = 0;
+       if (!EXT4_I(inode)->i_file_acl)
+               goto cleanup;
+       ea_idebug(inode, "reading block %u", EXT4_I(inode)->i_file_acl);
+       bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+       error = -EIO;
+       if (!bh)
+               goto cleanup;
+       ea_bdebug(bh, "b_count=%d, refcount=%d",
+               atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
+       if (ext4_xattr_check_block(bh)) {
+               ext4_error(inode->i_sb, __FUNCTION__,
+                          "inode %lu: bad block %llu", inode->i_ino,
+                          EXT4_I(inode)->i_file_acl);
+               error = -EIO;
+               goto cleanup;
+       }
+       ext4_xattr_cache_insert(bh);
+       error = ext4_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size);
+
+cleanup:
+       brelse(bh);
+
+       return error;
+}
+
+static int
+ext4_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
+{
+       struct ext4_xattr_ibody_header *header;
+       struct ext4_inode *raw_inode;
+       struct ext4_iloc iloc;
+       void *end;
+       int error;
+
+       if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR))
+               return 0;
+       error = ext4_get_inode_loc(inode, &iloc);
+       if (error)
+               return error;
+       raw_inode = ext4_raw_inode(&iloc);
+       header = IHDR(inode, raw_inode);
+       end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+       error = ext4_xattr_check_names(IFIRST(header), end);
+       if (error)
+               goto cleanup;
+       error = ext4_xattr_list_entries(inode, IFIRST(header),
+                                       buffer, buffer_size);
+
+cleanup:
+       brelse(iloc.bh);
+       return error;
+}
+
+/*
+ * ext4_xattr_list()
+ *
+ * Copy a list of attribute names into the buffer
+ * provided, or compute the buffer size required.
+ * Buffer is NULL to compute the size of the buffer required.
+ *
+ * Returns a negative error number on failure, or the number of bytes
+ * used / required on success.
+ */
+int
+ext4_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
+{
+       int i_error, b_error;
+
+       down_read(&EXT4_I(inode)->xattr_sem);
+       i_error = ext4_xattr_ibody_list(inode, buffer, buffer_size);
+       if (i_error < 0) {
+               b_error = 0;
+       } else {
+               if (buffer) {
+                       buffer += i_error;
+                       buffer_size -= i_error;
+               }
+               b_error = ext4_xattr_block_list(inode, buffer, buffer_size);
+               if (b_error < 0)
+                       i_error = 0;
+       }
+       up_read(&EXT4_I(inode)->xattr_sem);
+       return i_error + b_error;
+}
+
+/*
+ * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
+ * not set, set it.
+ */
+static void ext4_xattr_update_super_block(handle_t *handle,
+                                         struct super_block *sb)
+{
+       if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
+               return;
+
+       lock_super(sb);
+       if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
+               EXT4_SB(sb)->s_es->s_feature_compat |=
+                       cpu_to_le32(EXT4_FEATURE_COMPAT_EXT_ATTR);
+               sb->s_dirt = 1;
+               ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
+       }
+       unlock_super(sb);
+}
+
+/*
+ * Release the xattr block BH: If the reference count is > 1, decrement
+ * it; otherwise free the block.
+ */
+static void
+ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+                        struct buffer_head *bh)
+{
+       struct mb_cache_entry *ce = NULL;
+
+       ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
+       if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
+               ea_bdebug(bh, "refcount now=0; freeing");
+               if (ce)
+                       mb_cache_entry_free(ce);
+               ext4_free_blocks(handle, inode, bh->b_blocknr, 1);
+               get_bh(bh);
+               ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
+       } else {
+               if (ext4_journal_get_write_access(handle, bh) == 0) {
+                       lock_buffer(bh);
+                       BHDR(bh)->h_refcount = cpu_to_le32(
+                               le32_to_cpu(BHDR(bh)->h_refcount) - 1);
+                       ext4_journal_dirty_metadata(handle, bh);
+                       if (IS_SYNC(inode))
+                               handle->h_sync = 1;
+                       DQUOT_FREE_BLOCK(inode, 1);
+                       unlock_buffer(bh);
+                       ea_bdebug(bh, "refcount now=%d; releasing",
+                                 le32_to_cpu(BHDR(bh)->h_refcount));
+               }
+               if (ce)
+                       mb_cache_entry_release(ce);
+       }
+}
+
+struct ext4_xattr_info {
+       int name_index;
+       const char *name;
+       const void *value;
+       size_t value_len;
+};
+
+struct ext4_xattr_search {
+       struct ext4_xattr_entry *first;
+       void *base;
+       void *end;
+       struct ext4_xattr_entry *here;
+       int not_found;
+};
+
+static int
+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+{
+       struct ext4_xattr_entry *last;
+       size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
+
+       /* Compute min_offs and last. */
+       last = s->first;
+       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+               if (!last->e_value_block && last->e_value_size) {
+                       size_t offs = le16_to_cpu(last->e_value_offs);
+                       if (offs < min_offs)
+                               min_offs = offs;
+               }
+       }
+       free = min_offs - ((void *)last - s->base) - sizeof(__u32);
+       if (!s->not_found) {
+               if (!s->here->e_value_block && s->here->e_value_size) {
+                       size_t size = le32_to_cpu(s->here->e_value_size);
+                       free += EXT4_XATTR_SIZE(size);
+               }
+               free += EXT4_XATTR_LEN(name_len);
+       }
+       if (i->value) {
+               if (free < EXT4_XATTR_SIZE(i->value_len) ||
+                   free < EXT4_XATTR_LEN(name_len) +
+                          EXT4_XATTR_SIZE(i->value_len))
+                       return -ENOSPC;
+       }
+
+       if (i->value && s->not_found) {
+               /* Insert the new name. */
+               size_t size = EXT4_XATTR_LEN(name_len);
+               size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
+               memmove((void *)s->here + size, s->here, rest);
+               memset(s->here, 0, size);
+               s->here->e_name_index = i->name_index;
+               s->here->e_name_len = name_len;
+               memcpy(s->here->e_name, i->name, name_len);
+       } else {
+               if (!s->here->e_value_block && s->here->e_value_size) {
+                       void *first_val = s->base + min_offs;
+                       size_t offs = le16_to_cpu(s->here->e_value_offs);
+                       void *val = s->base + offs;
+                       size_t size = EXT4_XATTR_SIZE(
+                               le32_to_cpu(s->here->e_value_size));
+
+                       if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) {
+                               /* The old and the new value have the same
+                                  size. Just replace. */
+                               s->here->e_value_size =
+                                       cpu_to_le32(i->value_len);
+                               memset(val + size - EXT4_XATTR_PAD, 0,
+                                      EXT4_XATTR_PAD); /* Clear pad bytes. */
+                               memcpy(val, i->value, i->value_len);
+                               return 0;
+                       }
+
+                       /* Remove the old value. */
+                       memmove(first_val + size, first_val, val - first_val);
+                       memset(first_val, 0, size);
+                       s->here->e_value_size = 0;
+                       s->here->e_value_offs = 0;
+                       min_offs += size;
+
+                       /* Adjust all value offsets. */
+                       last = s->first;
+                       while (!IS_LAST_ENTRY(last)) {
+                               size_t o = le16_to_cpu(last->e_value_offs);
+                               if (!last->e_value_block &&
+                                   last->e_value_size && o < offs)
+                                       last->e_value_offs =
+                                               cpu_to_le16(o + size);
+                               last = EXT4_XATTR_NEXT(last);
+                       }
+               }
+               if (!i->value) {
+                       /* Remove the old name. */
+                       size_t size = EXT4_XATTR_LEN(name_len);
+                       last = ENTRY((void *)last - size);
+                       memmove(s->here, (void *)s->here + size,
+                               (void *)last - (void *)s->here + sizeof(__u32));
+                       memset(last, 0, size);
+               }
+       }
+
+       if (i->value) {
+               /* Insert the new value. */
+               s->here->e_value_size = cpu_to_le32(i->value_len);
+               if (i->value_len) {
+                       size_t size = EXT4_XATTR_SIZE(i->value_len);
+                       void *val = s->base + min_offs - size;
+                       s->here->e_value_offs = cpu_to_le16(min_offs - size);
+                       memset(val + size - EXT4_XATTR_PAD, 0,
+                              EXT4_XATTR_PAD); /* Clear the pad bytes. */
+                       memcpy(val, i->value, i->value_len);
+               }
+       }
+       return 0;
+}
+
+struct ext4_xattr_block_find {
+       struct ext4_xattr_search s;
+       struct buffer_head *bh;
+};
+
+static int
+ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
+                     struct ext4_xattr_block_find *bs)
+{
+       struct super_block *sb = inode->i_sb;
+       int error;
+
+       ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
+                 i->name_index, i->name, i->value, (long)i->value_len);
+
+       if (EXT4_I(inode)->i_file_acl) {
+               /* The inode already has an extended attribute block. */
+               bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
+               error = -EIO;
+               if (!bs->bh)
+                       goto cleanup;
+               ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
+                       atomic_read(&(bs->bh->b_count)),
+                       le32_to_cpu(BHDR(bs->bh)->h_refcount));
+               if (ext4_xattr_check_block(bs->bh)) {
+                       ext4_error(sb, __FUNCTION__,
+                               "inode %lu: bad block %llu", inode->i_ino,
+                               EXT4_I(inode)->i_file_acl);
+                       error = -EIO;
+                       goto cleanup;
+               }
+               /* Find the named attribute. */
+               bs->s.base = BHDR(bs->bh);
+               bs->s.first = BFIRST(bs->bh);
+               bs->s.end = bs->bh->b_data + bs->bh->b_size;
+               bs->s.here = bs->s.first;
+               error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
+                                             i->name, bs->bh->b_size, 1);
+               if (error && error != -ENODATA)
+                       goto cleanup;
+               bs->s.not_found = error;
+       }
+       error = 0;
+
+cleanup:
+       return error;
+}
+
+static int
+ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+                    struct ext4_xattr_info *i,
+                    struct ext4_xattr_block_find *bs)
+{
+       struct super_block *sb = inode->i_sb;
+       struct buffer_head *new_bh = NULL;
+       struct ext4_xattr_search *s = &bs->s;
+       struct mb_cache_entry *ce = NULL;
+       int error;
+
+#define header(x) ((struct ext4_xattr_header *)(x))
+
+       if (i->value && i->value_len > sb->s_blocksize)
+               return -ENOSPC;
+       if (s->base) {
+               ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
+                                       bs->bh->b_blocknr);
+               if (header(s->base)->h_refcount == cpu_to_le32(1)) {
+                       if (ce) {
+                               mb_cache_entry_free(ce);
+                               ce = NULL;
+                       }
+                       ea_bdebug(bs->bh, "modifying in-place");
+                       error = ext4_journal_get_write_access(handle, bs->bh);
+                       if (error)
+                               goto cleanup;
+                       lock_buffer(bs->bh);
+                       error = ext4_xattr_set_entry(i, s);
+                       if (!error) {
+                               if (!IS_LAST_ENTRY(s->first))
+                                       ext4_xattr_rehash(header(s->base),
+                                                         s->here);
+                               ext4_xattr_cache_insert(bs->bh);
+                       }
+                       unlock_buffer(bs->bh);
+                       if (error == -EIO)
+                               goto bad_block;
+                       if (!error)
+                               error = ext4_journal_dirty_metadata(handle,
+                                                                   bs->bh);
+                       if (error)
+                               goto cleanup;
+                       goto inserted;
+               } else {
+                       int offset = (char *)s->here - bs->bh->b_data;
+
+                       if (ce) {
+                               mb_cache_entry_release(ce);
+                               ce = NULL;
+                       }
+                       ea_bdebug(bs->bh, "cloning");
+                       s->base = kmalloc(bs->bh->b_size, GFP_KERNEL);
+                       error = -ENOMEM;
+                       if (s->base == NULL)
+                               goto cleanup;
+                       memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
+                       s->first = ENTRY(header(s->base)+1);
+                       header(s->base)->h_refcount = cpu_to_le32(1);
+                       s->here = ENTRY(s->base + offset);
+                       s->end = s->base + bs->bh->b_size;
+               }
+       } else {
+               /* Allocate a buffer where we construct the new block. */
+               s->base = kmalloc(sb->s_blocksize, GFP_KERNEL);
+               /* assert(header == s->base) */
+               error = -ENOMEM;
+               if (s->base == NULL)
+                       goto cleanup;
+               memset(s->base, 0, sb->s_blocksize);
+               header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
+               header(s->base)->h_blocks = cpu_to_le32(1);
+               header(s->base)->h_refcount = cpu_to_le32(1);
+               s->first = ENTRY(header(s->base)+1);
+               s->here = ENTRY(header(s->base)+1);
+               s->end = s->base + sb->s_blocksize;
+       }
+
+       error = ext4_xattr_set_entry(i, s);
+       if (error == -EIO)
+               goto bad_block;
+       if (error)
+               goto cleanup;
+       if (!IS_LAST_ENTRY(s->first))
+               ext4_xattr_rehash(header(s->base), s->here);
+
+inserted:
+       if (!IS_LAST_ENTRY(s->first)) {
+               new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce);
+               if (new_bh) {
+                       /* We found an identical block in the cache. */
+                       if (new_bh == bs->bh)
+                               ea_bdebug(new_bh, "keeping");
+                       else {
+                               /* The old block is released after updating
+                                  the inode. */
+                               error = -EDQUOT;
+                               if (DQUOT_ALLOC_BLOCK(inode, 1))
+                                       goto cleanup;
+                               error = ext4_journal_get_write_access(handle,
+                                                                     new_bh);
+                               if (error)
+                                       goto cleanup_dquot;
+                               lock_buffer(new_bh);
+                               BHDR(new_bh)->h_refcount = cpu_to_le32(1 +
+                                       le32_to_cpu(BHDR(new_bh)->h_refcount));
+                               ea_bdebug(new_bh, "reusing; refcount now=%d",
+                                       le32_to_cpu(BHDR(new_bh)->h_refcount));
+                               unlock_buffer(new_bh);
+                               error = ext4_journal_dirty_metadata(handle,
+                                                                   new_bh);
+                               if (error)
+                                       goto cleanup_dquot;
+                       }
+                       mb_cache_entry_release(ce);
+                       ce = NULL;
+               } else if (bs->bh && s->base == bs->bh->b_data) {
+                       /* We were modifying this block in-place. */
+                       ea_bdebug(bs->bh, "keeping this block");
+                       new_bh = bs->bh;
+                       get_bh(new_bh);
+               } else {
+                       /* We need to allocate a new block */
+                       ext4_fsblk_t goal = le32_to_cpu(
+                                       EXT4_SB(sb)->s_es->s_first_data_block) +
+                               (ext4_fsblk_t)EXT4_I(inode)->i_block_group *
+                               EXT4_BLOCKS_PER_GROUP(sb);
+                       ext4_fsblk_t block = ext4_new_block(handle, inode,
+                                                       goal, &error);
+                       if (error)
+                               goto cleanup;
+                       ea_idebug(inode, "creating block %d", block);
+
+                       new_bh = sb_getblk(sb, block);
+                       if (!new_bh) {
+getblk_failed:
+                               ext4_free_blocks(handle, inode, block, 1);
+                               error = -EIO;
+                               goto cleanup;
+                       }
+                       lock_buffer(new_bh);
+                       error = ext4_journal_get_create_access(handle, new_bh);
+                       if (error) {
+                               unlock_buffer(new_bh);
+                               goto getblk_failed;
+                       }
+                       memcpy(new_bh->b_data, s->base, new_bh->b_size);
+                       set_buffer_uptodate(new_bh);
+                       unlock_buffer(new_bh);
+                       ext4_xattr_cache_insert(new_bh);
+                       error = ext4_journal_dirty_metadata(handle, new_bh);
+                       if (error)
+                               goto cleanup;
+               }
+       }
+
+       /* Update the inode. */
+       EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
+
+       /* Drop the previous xattr block. */
+       if (bs->bh && bs->bh != new_bh)
+               ext4_xattr_release_block(handle, inode, bs->bh);
+       error = 0;
+
+cleanup:
+       if (ce)
+               mb_cache_entry_release(ce);
+       brelse(new_bh);
+       if (!(bs->bh && s->base == bs->bh->b_data))
+               kfree(s->base);
+
+       return error;
+
+cleanup_dquot:
+       DQUOT_FREE_BLOCK(inode, 1);
+       goto cleanup;
+
+bad_block:
+       ext4_error(inode->i_sb, __FUNCTION__,
+                  "inode %lu: bad block %llu", inode->i_ino,
+                  EXT4_I(inode)->i_file_acl);
+       goto cleanup;
+
+#undef header
+}
+
+struct ext4_xattr_ibody_find {
+       struct ext4_xattr_search s;
+       struct ext4_iloc iloc;
+};
+
+static int
+ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+                     struct ext4_xattr_ibody_find *is)
+{
+       struct ext4_xattr_ibody_header *header;
+       struct ext4_inode *raw_inode;
+       int error;
+
+       if (EXT4_I(inode)->i_extra_isize == 0)
+               return 0;
+       raw_inode = ext4_raw_inode(&is->iloc);
+       header = IHDR(inode, raw_inode);
+       is->s.base = is->s.first = IFIRST(header);
+       is->s.here = is->s.first;
+       is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+       if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
+               error = ext4_xattr_check_names(IFIRST(header), is->s.end);
+               if (error)
+                       return error;
+               /* Find the named attribute. */
+               error = ext4_xattr_find_entry(&is->s.here, i->name_index,
+                                             i->name, is->s.end -
+                                             (void *)is->s.base, 0);
+               if (error && error != -ENODATA)
+                       return error;
+               is->s.not_found = error;
+       }
+       return 0;
+}
+
+static int
+ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+                    struct ext4_xattr_info *i,
+                    struct ext4_xattr_ibody_find *is)
+{
+       struct ext4_xattr_ibody_header *header;
+       struct ext4_xattr_search *s = &is->s;
+       int error;
+
+       if (EXT4_I(inode)->i_extra_isize == 0)
+               return -ENOSPC;
+       error = ext4_xattr_set_entry(i, s);
+       if (error)
+               return error;
+       header = IHDR(inode, ext4_raw_inode(&is->iloc));
+       if (!IS_LAST_ENTRY(s->first)) {
+               header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
+               EXT4_I(inode)->i_state |= EXT4_STATE_XATTR;
+       } else {
+               header->h_magic = cpu_to_le32(0);
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR;
+       }
+       return 0;
+}
+
+/*
+ * ext4_xattr_set_handle()
+ *
+ * Create, replace or remove an extended attribute for this inode. Buffer
+ * is NULL to remove an existing extended attribute, and non-NULL to
+ * either replace an existing extended attribute, or create a new extended
+ * attribute. The flags XATTR_REPLACE and XATTR_CREATE
+ * specify that an extended attribute must exist and must not exist
+ * previous to the call, respectively.
+ *
+ * Returns 0, or a negative error number on failure.
+ */
+int
+ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
+                     const char *name, const void *value, size_t value_len,
+                     int flags)
+{
+       struct ext4_xattr_info i = {
+               .name_index = name_index,
+               .name = name,
+               .value = value,
+               .value_len = value_len,
+
+       };
+       struct ext4_xattr_ibody_find is = {
+               .s = { .not_found = -ENODATA, },
+       };
+       struct ext4_xattr_block_find bs = {
+               .s = { .not_found = -ENODATA, },
+       };
+       int error;
+
+       if (!name)
+               return -EINVAL;
+       if (strlen(name) > 255)
+               return -ERANGE;
+       down_write(&EXT4_I(inode)->xattr_sem);
+       error = ext4_get_inode_loc(inode, &is.iloc);
+       if (error)
+               goto cleanup;
+
+       if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) {
+               struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
+               memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW;
+       }
+
+       error = ext4_xattr_ibody_find(inode, &i, &is);
+       if (error)
+               goto cleanup;
+       if (is.s.not_found)
+               error = ext4_xattr_block_find(inode, &i, &bs);
+       if (error)
+               goto cleanup;
+       if (is.s.not_found && bs.s.not_found) {
+               error = -ENODATA;
+               if (flags & XATTR_REPLACE)
+                       goto cleanup;
+               error = 0;
+               if (!value)
+                       goto cleanup;
+       } else {
+               error = -EEXIST;
+               if (flags & XATTR_CREATE)
+                       goto cleanup;
+       }
+       error = ext4_journal_get_write_access(handle, is.iloc.bh);
+       if (error)
+               goto cleanup;
+       if (!value) {
+               if (!is.s.not_found)
+                       error = ext4_xattr_ibody_set(handle, inode, &i, &is);
+               else if (!bs.s.not_found)
+                       error = ext4_xattr_block_set(handle, inode, &i, &bs);
+       } else {
+               error = ext4_xattr_ibody_set(handle, inode, &i, &is);
+               if (!error && !bs.s.not_found) {
+                       i.value = NULL;
+                       error = ext4_xattr_block_set(handle, inode, &i, &bs);
+               } else if (error == -ENOSPC) {
+                       error = ext4_xattr_block_set(handle, inode, &i, &bs);
+                       if (error)
+                               goto cleanup;
+                       if (!is.s.not_found) {
+                               i.value = NULL;
+                               error = ext4_xattr_ibody_set(handle, inode, &i,
+                                                            &is);
+                       }
+               }
+       }
+       if (!error) {
+               ext4_xattr_update_super_block(handle, inode->i_sb);
+               inode->i_ctime = CURRENT_TIME_SEC;
+               error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
+               /*
+                * The bh is consumed by ext4_mark_iloc_dirty, even with
+                * error != 0.
+                */
+               is.iloc.bh = NULL;
+               if (IS_SYNC(inode))
+                       handle->h_sync = 1;
+       }
+
+cleanup:
+       brelse(is.iloc.bh);
+       brelse(bs.bh);
+       up_write(&EXT4_I(inode)->xattr_sem);
+       return error;
+}
+
+/*
+ * ext4_xattr_set()
+ *
+ * Like ext4_xattr_set_handle, but start from an inode. This extended
+ * attribute modification is a filesystem transaction by itself.
+ *
+ * Returns 0, or a negative error number on failure.
+ */
+int
+ext4_xattr_set(struct inode *inode, int name_index, const char *name,
+              const void *value, size_t value_len, int flags)
+{
+       handle_t *handle;
+       int error, retries = 0;
+
+retry:
+       handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+       if (IS_ERR(handle)) {
+               error = PTR_ERR(handle);
+       } else {
+               int error2;
+
+               error = ext4_xattr_set_handle(handle, inode, name_index, name,
+                                             value, value_len, flags);
+               error2 = ext4_journal_stop(handle);
+               if (error == -ENOSPC &&
+                   ext4_should_retry_alloc(inode->i_sb, &retries))
+                       goto retry;
+               if (error == 0)
+                       error = error2;
+       }
+
+       return error;
+}
+
+/*
+ * ext4_xattr_delete_inode()
+ *
+ * Free extended attribute resources associated with this inode. This
+ * is called immediately before an inode is freed. We have exclusive
+ * access to the inode.
+ */
+void
+ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
+{
+       struct buffer_head *bh = NULL;
+
+       if (!EXT4_I(inode)->i_file_acl)
+               goto cleanup;
+       bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
+       if (!bh) {
+               ext4_error(inode->i_sb, __FUNCTION__,
+                       "inode %lu: block %llu read error", inode->i_ino,
+                       EXT4_I(inode)->i_file_acl);
+               goto cleanup;
+       }
+       if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
+           BHDR(bh)->h_blocks != cpu_to_le32(1)) {
+               ext4_error(inode->i_sb, __FUNCTION__,
+                       "inode %lu: bad block %llu", inode->i_ino,
+                       EXT4_I(inode)->i_file_acl);
+               goto cleanup;
+       }
+       ext4_xattr_release_block(handle, inode, bh);
+       EXT4_I(inode)->i_file_acl = 0;
+
+cleanup:
+       brelse(bh);
+}
+
+/*
+ * ext4_xattr_put_super()
+ *
+ * This is called when a file system is unmounted.
+ */
+void
+ext4_xattr_put_super(struct super_block *sb)
+{
+       mb_cache_shrink(sb->s_bdev);
+}
+
+/*
+ * ext4_xattr_cache_insert()
+ *
+ * Create a new entry in the extended attribute cache, and insert
+ * it unless such an entry is already in the cache.
+ *
+ * Returns 0, or a negative error number on failure.
+ */
+static void
+ext4_xattr_cache_insert(struct buffer_head *bh)
+{
+       __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
+       struct mb_cache_entry *ce;
+       int error;
+
+       ce = mb_cache_entry_alloc(ext4_xattr_cache);
+       if (!ce) {
+               ea_bdebug(bh, "out of memory");
+               return;
+       }
+       error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash);
+       if (error) {
+               mb_cache_entry_free(ce);
+               if (error == -EBUSY) {
+                       ea_bdebug(bh, "already in cache");
+                       error = 0;
+               }
+       } else {
+               ea_bdebug(bh, "inserting [%x]", (int)hash);
+               mb_cache_entry_release(ce);
+       }
+}
+
+/*
+ * ext4_xattr_cmp()
+ *
+ * Compare two extended attribute blocks for equality.
+ *
+ * Returns 0 if the blocks are equal, 1 if they differ, and
+ * a negative error number on errors.
+ */
+static int
+ext4_xattr_cmp(struct ext4_xattr_header *header1,
+              struct ext4_xattr_header *header2)
+{
+       struct ext4_xattr_entry *entry1, *entry2;
+
+       entry1 = ENTRY(header1+1);
+       entry2 = ENTRY(header2+1);
+       while (!IS_LAST_ENTRY(entry1)) {
+               if (IS_LAST_ENTRY(entry2))
+                       return 1;
+               if (entry1->e_hash != entry2->e_hash ||
+                   entry1->e_name_index != entry2->e_name_index ||
+                   entry1->e_name_len != entry2->e_name_len ||
+                   entry1->e_value_size != entry2->e_value_size ||
+                   memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
+                       return 1;
+               if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
+                       return -EIO;
+               if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
+                          (char *)header2 + le16_to_cpu(entry2->e_value_offs),
+                          le32_to_cpu(entry1->e_value_size)))
+                       return 1;
+
+               entry1 = EXT4_XATTR_NEXT(entry1);
+               entry2 = EXT4_XATTR_NEXT(entry2);
+       }
+       if (!IS_LAST_ENTRY(entry2))
+               return 1;
+       return 0;
+}
+
+/*
+ * ext4_xattr_cache_find()
+ *
+ * Find an identical extended attribute block.
+ *
+ * Returns a pointer to the block found, or NULL if such a block was
+ * not found or an error occurred.
+ */
+static struct buffer_head *
+ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
+                     struct mb_cache_entry **pce)
+{
+       __u32 hash = le32_to_cpu(header->h_hash);
+       struct mb_cache_entry *ce;
+
+       if (!header->h_hash)
+               return NULL;  /* never share */
+       ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
+again:
+       ce = mb_cache_entry_find_first(ext4_xattr_cache, 0,
+                                      inode->i_sb->s_bdev, hash);
+       while (ce) {
+               struct buffer_head *bh;
+
+               if (IS_ERR(ce)) {
+                       if (PTR_ERR(ce) == -EAGAIN)
+                               goto again;
+                       break;
+               }
+               bh = sb_bread(inode->i_sb, ce->e_block);
+               if (!bh) {
+                       ext4_error(inode->i_sb, __FUNCTION__,
+                               "inode %lu: block %lu read error",
+                               inode->i_ino, (unsigned long) ce->e_block);
+               } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
+                               EXT4_XATTR_REFCOUNT_MAX) {
+                       ea_idebug(inode, "block %lu refcount %d>=%d",
+                                 (unsigned long) ce->e_block,
+                                 le32_to_cpu(BHDR(bh)->h_refcount),
+                                         EXT4_XATTR_REFCOUNT_MAX);
+               } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
+                       *pce = ce;
+                       return bh;
+               }
+               brelse(bh);
+               ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash);
+       }
+       return NULL;
+}
+
+#define NAME_HASH_SHIFT 5
+#define VALUE_HASH_SHIFT 16
+
+/*
+ * ext4_xattr_hash_entry()
+ *
+ * Compute the hash of an extended attribute.
+ */
+static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
+                                        struct ext4_xattr_entry *entry)
+{
+       __u32 hash = 0;
+       char *name = entry->e_name;
+       int n;
+
+       for (n=0; n < entry->e_name_len; n++) {
+               hash = (hash << NAME_HASH_SHIFT) ^
+                      (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
+                      *name++;
+       }
+
+       if (entry->e_value_block == 0 && entry->e_value_size != 0) {
+               __le32 *value = (__le32 *)((char *)header +
+                       le16_to_cpu(entry->e_value_offs));
+               for (n = (le32_to_cpu(entry->e_value_size) +
+                    EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) {
+                       hash = (hash << VALUE_HASH_SHIFT) ^
+                              (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
+                              le32_to_cpu(*value++);
+               }
+       }
+       entry->e_hash = cpu_to_le32(hash);
+}
+
+#undef NAME_HASH_SHIFT
+#undef VALUE_HASH_SHIFT
+
+#define BLOCK_HASH_SHIFT 16
+
+/*
+ * ext4_xattr_rehash()
+ *
+ * Re-compute the extended attribute hash value after an entry has changed.
+ */
+static void ext4_xattr_rehash(struct ext4_xattr_header *header,
+                             struct ext4_xattr_entry *entry)
+{
+       struct ext4_xattr_entry *here;
+       __u32 hash = 0;
+
+       ext4_xattr_hash_entry(header, entry);
+       here = ENTRY(header+1);
+       while (!IS_LAST_ENTRY(here)) {
+               if (!here->e_hash) {
+                       /* Block is not shared if an entry's hash value == 0 */
+                       hash = 0;
+                       break;
+               }
+               hash = (hash << BLOCK_HASH_SHIFT) ^
+                      (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
+                      le32_to_cpu(here->e_hash);
+               here = EXT4_XATTR_NEXT(here);
+       }
+       header->h_hash = cpu_to_le32(hash);
+}
+
+#undef BLOCK_HASH_SHIFT
+
+int __init
+init_ext4_xattr(void)
+{
+       ext4_xattr_cache = mb_cache_create("ext4_xattr", NULL,
+               sizeof(struct mb_cache_entry) +
+               sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6);
+       if (!ext4_xattr_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void
+exit_ext4_xattr(void)
+{
+       if (ext4_xattr_cache)
+               mb_cache_destroy(ext4_xattr_cache);
+       ext4_xattr_cache = NULL;
+}
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
new file mode 100644 (file)
index 0000000..79432b3
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+  File: fs/ext4/xattr.h
+
+  On-disk format of extended attributes for the ext4 filesystem.
+
+  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
+*/
+
+#include <linux/xattr.h>
+
+/* Magic value in attribute blocks */
+#define EXT4_XATTR_MAGIC               0xEA020000
+
+/* Maximum number of references to one attribute block */
+#define EXT4_XATTR_REFCOUNT_MAX                1024
+
+/* Name indexes */
+#define EXT4_XATTR_INDEX_USER                  1
+#define EXT4_XATTR_INDEX_POSIX_ACL_ACCESS      2
+#define EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT     3
+#define EXT4_XATTR_INDEX_TRUSTED               4
+#define        EXT4_XATTR_INDEX_LUSTRE                 5
+#define EXT4_XATTR_INDEX_SECURITY              6
+
+struct ext4_xattr_header {
+       __le32  h_magic;        /* magic number for identification */
+       __le32  h_refcount;     /* reference count */
+       __le32  h_blocks;       /* number of disk blocks used */
+       __le32  h_hash;         /* hash value of all attributes */
+       __u32   h_reserved[4];  /* zero right now */
+};
+
+struct ext4_xattr_ibody_header {
+       __le32  h_magic;        /* magic number for identification */
+};
+
+struct ext4_xattr_entry {
+       __u8    e_name_len;     /* length of name */
+       __u8    e_name_index;   /* attribute name index */
+       __le16  e_value_offs;   /* offset in disk block of value */
+       __le32  e_value_block;  /* disk block attribute is stored on (n/i) */
+       __le32  e_value_size;   /* size of attribute value */
+       __le32  e_hash;         /* hash value of name and value */
+       char    e_name[0];      /* attribute name */
+};
+
+#define EXT4_XATTR_PAD_BITS            2
+#define EXT4_XATTR_PAD         (1<<EXT4_XATTR_PAD_BITS)
+#define EXT4_XATTR_ROUND               (EXT4_XATTR_PAD-1)
+#define EXT4_XATTR_LEN(name_len) \
+       (((name_len) + EXT4_XATTR_ROUND + \
+       sizeof(struct ext4_xattr_entry)) & ~EXT4_XATTR_ROUND)
+#define EXT4_XATTR_NEXT(entry) \
+       ( (struct ext4_xattr_entry *)( \
+         (char *)(entry) + EXT4_XATTR_LEN((entry)->e_name_len)) )
+#define EXT4_XATTR_SIZE(size) \
+       (((size) + EXT4_XATTR_ROUND) & ~EXT4_XATTR_ROUND)
+
+# ifdef CONFIG_EXT4DEV_FS_XATTR
+
+extern struct xattr_handler ext4_xattr_user_handler;
+extern struct xattr_handler ext4_xattr_trusted_handler;
+extern struct xattr_handler ext4_xattr_acl_access_handler;
+extern struct xattr_handler ext4_xattr_acl_default_handler;
+extern struct xattr_handler ext4_xattr_security_handler;
+
+extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
+
+extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
+extern int ext4_xattr_list(struct inode *, char *, size_t);
+extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
+extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
+
+extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
+extern void ext4_xattr_put_super(struct super_block *);
+
+extern int init_ext4_xattr(void);
+extern void exit_ext4_xattr(void);
+
+extern struct xattr_handler *ext4_xattr_handlers[];
+
+# else  /* CONFIG_EXT4DEV_FS_XATTR */
+
+static inline int
+ext4_xattr_get(struct inode *inode, int name_index, const char *name,
+              void *buffer, size_t size, int flags)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ext4_xattr_list(struct inode *inode, void *buffer, size_t size)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ext4_xattr_set(struct inode *inode, int name_index, const char *name,
+              const void *value, size_t size, int flags)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
+              const char *name, const void *value, size_t size, int flags)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void
+ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
+{
+}
+
+static inline void
+ext4_xattr_put_super(struct super_block *sb)
+{
+}
+
+static inline int
+init_ext4_xattr(void)
+{
+       return 0;
+}
+
+static inline void
+exit_ext4_xattr(void)
+{
+}
+
+#define ext4_xattr_handlers    NULL
+
+# endif  /* CONFIG_EXT4DEV_FS_XATTR */
+
+#ifdef CONFIG_EXT4DEV_FS_SECURITY
+extern int ext4_init_security(handle_t *handle, struct inode *inode,
+                               struct inode *dir);
+#else
+static inline int ext4_init_security(handle_t *handle, struct inode *inode,
+                               struct inode *dir)
+{
+       return 0;
+}
+#endif
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
new file mode 100644 (file)
index 0000000..b6a6861
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * linux/fs/ext4/xattr_security.c
+ * Handler for storing security labels as extended attributes.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/ext4_fs.h>
+#include <linux/security.h>
+#include "xattr.h"
+
+static size_t
+ext4_xattr_security_list(struct inode *inode, char *list, size_t list_size,
+                        const char *name, size_t name_len)
+{
+       const size_t prefix_len = sizeof(XATTR_SECURITY_PREFIX)-1;
+       const size_t total_len = prefix_len + name_len + 1;
+
+
+       if (list && total_len <= list_size) {
+               memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
+               memcpy(list+prefix_len, name, name_len);
+               list[prefix_len + name_len] = '\0';
+       }
+       return total_len;
+}
+
+static int
+ext4_xattr_security_get(struct inode *inode, const char *name,
+                      void *buffer, size_t size)
+{
+       if (strcmp(name, "") == 0)
+               return -EINVAL;
+       return ext4_xattr_get(inode, EXT4_XATTR_INDEX_SECURITY, name,
+                             buffer, size);
+}
+
+static int
+ext4_xattr_security_set(struct inode *inode, const char *name,
+                      const void *value, size_t size, int flags)
+{
+       if (strcmp(name, "") == 0)
+               return -EINVAL;
+       return ext4_xattr_set(inode, EXT4_XATTR_INDEX_SECURITY, name,
+                             value, size, flags);
+}
+
+int
+ext4_init_security(handle_t *handle, struct inode *inode, struct inode *dir)
+{
+       int err;
+       size_t len;
+       void *value;
+       char *name;
+
+       err = security_inode_init_security(inode, dir, &name, &value, &len);
+       if (err) {
+               if (err == -EOPNOTSUPP)
+                       return 0;
+               return err;
+       }
+       err = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_SECURITY,
+                                   name, value, len, 0);
+       kfree(name);
+       kfree(value);
+       return err;
+}
+
+struct xattr_handler ext4_xattr_security_handler = {
+       .prefix = XATTR_SECURITY_PREFIX,
+       .list   = ext4_xattr_security_list,
+       .get    = ext4_xattr_security_get,
+       .set    = ext4_xattr_security_set,
+};
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
new file mode 100644 (file)
index 0000000..b76f2db
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * linux/fs/ext4/xattr_trusted.c
+ * Handler for trusted extended attributes.
+ *
+ * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/capability.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/ext4_fs.h>
+#include "xattr.h"
+
+#define XATTR_TRUSTED_PREFIX "trusted."
+
+static size_t
+ext4_xattr_trusted_list(struct inode *inode, char *list, size_t list_size,
+                       const char *name, size_t name_len)
+{
+       const size_t prefix_len = sizeof(XATTR_TRUSTED_PREFIX)-1;
+       const size_t total_len = prefix_len + name_len + 1;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return 0;
+
+       if (list && total_len <= list_size) {
+               memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
+               memcpy(list+prefix_len, name, name_len);
+               list[prefix_len + name_len] = '\0';
+       }
+       return total_len;
+}
+
+static int
+ext4_xattr_trusted_get(struct inode *inode, const char *name,
+                      void *buffer, size_t size)
+{
+       if (strcmp(name, "") == 0)
+               return -EINVAL;
+       return ext4_xattr_get(inode, EXT4_XATTR_INDEX_TRUSTED, name,
+                             buffer, size);
+}
+
+static int
+ext4_xattr_trusted_set(struct inode *inode, const char *name,
+                      const void *value, size_t size, int flags)
+{
+       if (strcmp(name, "") == 0)
+               return -EINVAL;
+       return ext4_xattr_set(inode, EXT4_XATTR_INDEX_TRUSTED, name,
+                             value, size, flags);
+}
+
+struct xattr_handler ext4_xattr_trusted_handler = {
+       .prefix = XATTR_TRUSTED_PREFIX,
+       .list   = ext4_xattr_trusted_list,
+       .get    = ext4_xattr_trusted_get,
+       .set    = ext4_xattr_trusted_set,
+};
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
new file mode 100644 (file)
index 0000000..c53cded
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * linux/fs/ext4/xattr_user.c
+ * Handler for extended user attributes.
+ *
+ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/ext4_jbd2.h>
+#include <linux/ext4_fs.h>
+#include "xattr.h"
+
+#define XATTR_USER_PREFIX "user."
+
+static size_t
+ext4_xattr_user_list(struct inode *inode, char *list, size_t list_size,
+                    const char *name, size_t name_len)
+{
+       const size_t prefix_len = sizeof(XATTR_USER_PREFIX)-1;
+       const size_t total_len = prefix_len + name_len + 1;
+
+       if (!test_opt(inode->i_sb, XATTR_USER))
+               return 0;
+
+       if (list && total_len <= list_size) {
+               memcpy(list, XATTR_USER_PREFIX, prefix_len);
+               memcpy(list+prefix_len, name, name_len);
+               list[prefix_len + name_len] = '\0';
+       }
+       return total_len;
+}
+
+static int
+ext4_xattr_user_get(struct inode *inode, const char *name,
+                   void *buffer, size_t size)
+{
+       if (strcmp(name, "") == 0)
+               return -EINVAL;
+       if (!test_opt(inode->i_sb, XATTR_USER))
+               return -EOPNOTSUPP;
+       return ext4_xattr_get(inode, EXT4_XATTR_INDEX_USER, name, buffer, size);
+}
+
+static int
+ext4_xattr_user_set(struct inode *inode, const char *name,
+                   const void *value, size_t size, int flags)
+{
+       if (strcmp(name, "") == 0)
+               return -EINVAL;
+       if (!test_opt(inode->i_sb, XATTR_USER))
+               return -EOPNOTSUPP;
+       return ext4_xattr_set(inode, EXT4_XATTR_INDEX_USER, name,
+                             value, size, flags);
+}
+
+struct xattr_handler ext4_xattr_user_handler = {
+       .prefix = XATTR_USER_PREFIX,
+       .list   = ext4_xattr_user_list,
+       .get    = ext4_xattr_user_get,
+       .set    = ext4_xattr_user_set,
+};
index 4613cb2021700c5a4bb23c01e038ff9baa6c9b50..78945b53b0f827fed88da656666ec1dd6fa9aaa9 100644 (file)
@@ -1472,7 +1472,7 @@ int fat_flush_inodes(struct super_block *sb, struct inode *i1, struct inode *i2)
                ret = writeback_inode(i1);
        if (!ret && i2)
                ret = writeback_inode(i2);
-       if (!ret && sb) {
+       if (!ret) {
                struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
                ret = filemap_flush(mapping);
        }
index 8605155db171359398c952d8319c551a95fb305b..cfc8f81e60d0133a060a9d5374fc399820de9c9a 100644 (file)
@@ -138,6 +138,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
                struct fuse_entry_out outarg;
                struct fuse_conn *fc;
                struct fuse_req *req;
+               struct dentry *parent;
 
                /* Doesn't hurt to "reset" the validity timeout */
                fuse_invalidate_entry_cache(entry);
@@ -151,8 +152,10 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
                if (IS_ERR(req))
                        return 0;
 
-               fuse_lookup_init(req, entry->d_parent->d_inode, entry, &outarg);
+               parent = dget_parent(entry);
+               fuse_lookup_init(req, parent->d_inode, entry, &outarg);
                request_send(fc, req);
+               dput(parent);
                err = req->out.h.error;
                /* Zero nodeid is same as -ENOENT */
                if (!err && !outarg.nodeid)
@@ -163,7 +166,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
                                fuse_send_forget(fc, req, outarg.nodeid, 1);
                                return 0;
                        }
+                       spin_lock(&fc->lock);
                        fi->nlookup ++;
+                       spin_unlock(&fc->lock);
                }
                fuse_put_request(fc, req);
                if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
@@ -175,22 +180,6 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
        return 1;
 }
 
-/*
- * Check if there's already a hashed alias of this directory inode.
- * If yes, then lookup and mkdir must not create a new alias.
- */
-static int dir_alias(struct inode *inode)
-{
-       if (S_ISDIR(inode->i_mode)) {
-               struct dentry *alias = d_find_alias(inode);
-               if (alias) {
-                       dput(alias);
-                       return 1;
-               }
-       }
-       return 0;
-}
-
 static int invalid_nodeid(u64 nodeid)
 {
        return !nodeid || nodeid == FUSE_ROOT_ID;
@@ -206,6 +195,24 @@ static int valid_mode(int m)
                S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
 }
 
+/*
+ * Add a directory inode to a dentry, ensuring that no other dentry
+ * refers to this inode.  Called with fc->inst_mutex.
+ */
+static int fuse_d_add_directory(struct dentry *entry, struct inode *inode)
+{
+       struct dentry *alias = d_find_alias(inode);
+       if (alias) {
+               /* This tries to shrink the subtree below alias */
+               fuse_invalidate_entry(alias);
+               dput(alias);
+               if (!list_empty(&inode->i_dentry))
+                       return -EBUSY;
+       }
+       d_add(entry, inode);
+       return 0;
+}
+
 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
                                  struct nameidata *nd)
 {
@@ -241,11 +248,17 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
        if (err && err != -ENOENT)
                return ERR_PTR(err);
 
-       if (inode && dir_alias(inode)) {
-               iput(inode);
-               return ERR_PTR(-EIO);
-       }
-       d_add(entry, inode);
+       if (inode && S_ISDIR(inode->i_mode)) {
+               mutex_lock(&fc->inst_mutex);
+               err = fuse_d_add_directory(entry, inode);
+               mutex_unlock(&fc->inst_mutex);
+               if (err) {
+                       iput(inode);
+                       return ERR_PTR(err);
+               }
+       } else
+               d_add(entry, inode);
+
        entry->d_op = &fuse_dentry_operations;
        if (!err)
                fuse_change_timeout(entry, &outarg);
@@ -401,12 +414,22 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
        }
        fuse_put_request(fc, req);
 
-       if (dir_alias(inode)) {
-               iput(inode);
-               return -EIO;
-       }
+       if (S_ISDIR(inode->i_mode)) {
+               struct dentry *alias;
+               mutex_lock(&fc->inst_mutex);
+               alias = d_find_alias(inode);
+               if (alias) {
+                       /* New directory must have moved since mkdir */
+                       mutex_unlock(&fc->inst_mutex);
+                       dput(alias);
+                       iput(inode);
+                       return -EBUSY;
+               }
+               d_instantiate(entry, inode);
+               mutex_unlock(&fc->inst_mutex);
+       } else
+               d_instantiate(entry, inode);
 
-       d_instantiate(entry, inode);
        fuse_change_timeout(entry, &outarg);
        fuse_invalidate_attr(dir);
        return 0;
@@ -935,14 +958,30 @@ static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
        }
 }
 
+static void fuse_vmtruncate(struct inode *inode, loff_t offset)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       int need_trunc;
+
+       spin_lock(&fc->lock);
+       need_trunc = inode->i_size > offset;
+       i_size_write(inode, offset);
+       spin_unlock(&fc->lock);
+
+       if (need_trunc) {
+               struct address_space *mapping = inode->i_mapping;
+               unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+               truncate_inode_pages(mapping, offset);
+       }
+}
+
 /*
  * Set attributes, and at the same time refresh them.
  *
  * Truncation is slightly complicated, because the 'truncate' request
  * may fail, in which case we don't want to touch the mapping.
- * vmtruncate() doesn't allow for this case.  So do the rlimit
- * checking by hand and call vmtruncate() only after the file has
- * actually been truncated.
+ * vmtruncate() doesn't allow for this case, so do the rlimit checking
+ * and the actual truncation by hand.
  */
 static int fuse_setattr(struct dentry *entry, struct iattr *attr)
 {
@@ -993,12 +1032,8 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
                        make_bad_inode(inode);
                        err = -EIO;
                } else {
-                       if (is_truncate) {
-                               loff_t origsize = i_size_read(inode);
-                               i_size_write(inode, outarg.attr.size);
-                               if (origsize > outarg.attr.size)
-                                       vmtruncate(inode, outarg.attr.size);
-                       }
+                       if (is_truncate)
+                               fuse_vmtruncate(inode, outarg.attr.size);
                        fuse_change_attributes(inode, &outarg.attr);
                        fi->i_time = time_to_jiffies(outarg.attr_valid,
                                                     outarg.attr_valid_nsec);
index 183626868eea602d4658e5395ec31f46130c0afe..2bb5ace3882dd9e5bcdc6e92af17e5fce873365e 100644 (file)
@@ -481,8 +481,10 @@ static int fuse_commit_write(struct file *file, struct page *page,
                err = -EIO;
        if (!err) {
                pos += count;
-               if (pos > i_size_read(inode))
+               spin_lock(&fc->lock);
+               if (pos > inode->i_size)
                        i_size_write(inode, pos);
+               spin_unlock(&fc->lock);
 
                if (offset == 0 && to == PAGE_CACHE_SIZE) {
                        clear_page_dirty(page);
@@ -586,8 +588,12 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
        }
        fuse_put_request(fc, req);
        if (res > 0) {
-               if (write && pos > i_size_read(inode))
-                       i_size_write(inode, pos);
+               if (write) {
+                       spin_lock(&fc->lock);
+                       if (pos > inode->i_size)
+                               i_size_write(inode, pos);
+                       spin_unlock(&fc->lock);
+               }
                *ppos = pos;
        }
        fuse_invalidate_attr(inode);
index 69c7750d55b8e59cfd95a0322f45557a74c0fdfe..91edb8932d905890a342c1f66cbfe5dbdd04f828 100644 (file)
@@ -239,6 +239,9 @@ struct fuse_conn {
        /** Lock protecting accessess to  members of this structure */
        spinlock_t lock;
 
+       /** Mutex protecting against directory alias creation */
+       struct mutex inst_mutex;
+
        /** Refcount */
        atomic_t count;
 
index 7d0a9aee01f248ef70bb1796a013fca8dda571af..fc42035703702813266c14c66d784ee1ec7e1c20 100644 (file)
@@ -109,6 +109,7 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
 
 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
 {
+       struct fuse_conn *fc = get_fuse_conn(inode);
        if (S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size)
                invalidate_inode_pages(inode->i_mapping);
 
@@ -117,7 +118,9 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
        inode->i_nlink   = attr->nlink;
        inode->i_uid     = attr->uid;
        inode->i_gid     = attr->gid;
+       spin_lock(&fc->lock);
        i_size_write(inode, attr->size);
+       spin_unlock(&fc->lock);
        inode->i_blocks  = attr->blocks;
        inode->i_atime.tv_sec   = attr->atime;
        inode->i_atime.tv_nsec  = attr->atimensec;
@@ -130,7 +133,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
 {
        inode->i_mode = attr->mode & S_IFMT;
-       i_size_write(inode, attr->size);
+       inode->i_size = attr->size;
        if (S_ISREG(inode->i_mode)) {
                fuse_init_common(inode);
                fuse_init_file_inode(inode);
@@ -169,7 +172,6 @@ struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
        struct inode *inode;
        struct fuse_inode *fi;
        struct fuse_conn *fc = get_fuse_conn_super(sb);
-       int retried = 0;
 
  retry:
        inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid);
@@ -183,16 +185,16 @@ struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
                fuse_init_inode(inode, attr);
                unlock_new_inode(inode);
        } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
-               BUG_ON(retried);
                /* Inode has changed type, any I/O on the old should fail */
                make_bad_inode(inode);
                iput(inode);
-               retried = 1;
                goto retry;
        }
 
        fi = get_fuse_inode(inode);
+       spin_lock(&fc->lock);
        fi->nlookup ++;
+       spin_unlock(&fc->lock);
        fuse_change_attributes(inode, attr);
        return inode;
 }
@@ -377,6 +379,7 @@ static struct fuse_conn *new_conn(void)
        fc = kzalloc(sizeof(*fc), GFP_KERNEL);
        if (fc) {
                spin_lock_init(&fc->lock);
+               mutex_init(&fc->inst_mutex);
                atomic_set(&fc->count, 1);
                init_waitqueue_head(&fc->waitq);
                init_waitqueue_head(&fc->blocked_waitq);
@@ -396,8 +399,10 @@ static struct fuse_conn *new_conn(void)
 
 void fuse_conn_put(struct fuse_conn *fc)
 {
-       if (atomic_dec_and_test(&fc->count))
+       if (atomic_dec_and_test(&fc->count)) {
+               mutex_destroy(&fc->inst_mutex);
                kfree(fc);
+       }
 }
 
 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
index 1f94dd35a9435562fa7e166320f58b4c005d1091..cdd1694e889bac2c866f3fdc328acc16b47d169f 100644 (file)
@@ -45,7 +45,7 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
        strncpy(buf, table_name, 256);
        buf[255] = '\0';
 
-       p = strstr(buf, ":");
+       p = strchr(buf, ':');
        if (!p) {
                log_info("invalid table_name \"%s\"", table_name);
                kfree(ls);
index 554fe5bd1b728fcf402836570f6571e54c9dc501..72eec6542d6a74adb1eff38ae850d7882e772a8c 100644 (file)
@@ -569,16 +569,15 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
        else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle)
                log_write_header(sdp, 0, PULL);
        lops_after_commit(sdp, ai);
-       sdp->sd_log_head = sdp->sd_log_flush_head;
 
+       gfs2_log_lock(sdp);
+       sdp->sd_log_head = sdp->sd_log_flush_head;
        sdp->sd_log_blks_free -= sdp->sd_log_num_hdrs;
-
        sdp->sd_log_blks_reserved = 0;
        sdp->sd_log_commited_buf = 0;
        sdp->sd_log_num_hdrs = 0;
        sdp->sd_log_commited_revoke = 0;
 
-       gfs2_log_lock(sdp);
        if (!list_empty(&ai->ai_ail1_list)) {
                list_add(&ai->ai_list, &sdp->sd_ail1_list);
                ai = NULL;
index 881e337b6a70ab8d7b9701d1d4b70380e2e79dba..ab6d1115f95d5fd2cfdb407a7338e0535fc9c487 100644 (file)
@@ -492,7 +492,7 @@ static int gfs2_check_magic(struct buffer_head *bh)
        ptr = kaddr + bh_offset(bh);
        if (*ptr == cpu_to_be32(GFS2_MAGIC))
                rv = 1;
-       kunmap_atomic(page, KM_USER0);
+       kunmap_atomic(kaddr, KM_USER0);
 
        return rv;
 }
@@ -626,7 +626,7 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
                                memcpy(bh->b_data,
                                       kaddr + bh_offset(bd2->bd_bh),
                                       sdp->sd_sb.sb_bsize);
-                               kunmap_atomic(page, KM_USER0);
+                               kunmap_atomic(kaddr, KM_USER0);
                                *(__be32 *)bh->b_data = 0;
                        } else {
                                bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
index 4fb743f4e4a42bae5cc2c585a3ae8122ed8ae983..e0599fed99ce0ff8e5f4a8786a469fdcfb911152 100644 (file)
@@ -162,7 +162,7 @@ static int zero_readpage(struct page *page)
 
        kaddr = kmap_atomic(page, KM_USER0);
        memset(kaddr, 0, PAGE_CACHE_SIZE);
-       kunmap_atomic(page, KM_USER0);
+       kunmap_atomic(kaddr, KM_USER0);
 
        SetPageUptodate(page);
 
@@ -195,7 +195,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
        memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
               ip->i_di.di_size);
        memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
-       kunmap_atomic(page, KM_USER0);
+       kunmap_atomic(kaddr, KM_USER0);
 
        brelse(dibh);
 
@@ -370,19 +370,22 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
        loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
        loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
        struct gfs2_alloc *al;
+       unsigned int write_len = to - from;
+
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh);
        error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
        if (error)
                goto out_uninit;
 
-       gfs2_write_calc_reserv(ip, to - from, &data_blocks, &ind_blocks);
+       gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
 
-       error = gfs2_write_alloc_required(ip, pos, from - to, &alloc_required);
+       error = gfs2_write_alloc_required(ip, pos, write_len, &alloc_required);
        if (error)
                goto out_unlock;
 
 
+       ip->i_alloc.al_requested = 0;
        if (alloc_required) {
                al = gfs2_alloc_get(ip);
 
@@ -482,7 +485,7 @@ static int gfs2_commit_write(struct file *file, struct page *page,
                kaddr = kmap_atomic(page, KM_USER0);
                memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
                       kaddr + from, to - from);
-               kunmap_atomic(page, KM_USER0);
+               kunmap_atomic(kaddr, KM_USER0);
 
                SetPageUptodate(page);
 
index 9eedfd12bfff5d60a610e98c50c5f05075d18504..b01e0cfc99b5fc8d38a3b04d059b66fa2c7c4542 100644 (file)
@@ -32,7 +32,7 @@ void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd);
 struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
 static inline void gfs2_alloc_put(struct gfs2_inode *ip)
 {
-       return; /* Se we can see where ip->i_alloc is used */
+       return; /* So we can see where ip->i_alloc is used */
 }
 
 int gfs2_inplace_reserve_i(struct gfs2_inode *ip,
index 5e03b2f67b932dcb37d61ed3dbbffb6576141cff..4ee3f006b861940f6d27c2b09711504498d59607 100644 (file)
@@ -293,7 +293,7 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff)
                if (h_vm_pgoff >= h_pgoff)
                        v_offset = 0;
 
-               unmap_hugepage_range(vma,
+               __unmap_hugepage_range(vma,
                                vma->vm_start + v_offset, vma->vm_end);
        }
 }
index 6dc6721d9e822d159fbb7e68cfc047aafe6c4e34..89e8da112a75e46cfa6aa6bef831266b07edf835 100644 (file)
@@ -150,11 +150,6 @@ int ioprio_best(unsigned short aprio, unsigned short bprio)
        unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
        unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
 
-       if (!ioprio_valid(aprio))
-               return bprio;
-       if (!ioprio_valid(bprio))
-               return aprio;
-
        if (aclass == IOPRIO_CLASS_NONE)
                aclass = IOPRIO_CLASS_BE;
        if (bclass == IOPRIO_CLASS_NONE)
index c518dd8fe60a5c539055b33e7e6b33860daad361..b85c686b60dbc7f58c32e04740dcb7052056544e 100644 (file)
@@ -725,6 +725,7 @@ journal_t * journal_init_dev(struct block_device *bdev,
                        __FUNCTION__);
                kfree(journal);
                journal = NULL;
+               goto out;
        }
        journal->j_dev = bdev;
        journal->j_fs_dev = fs_dev;
@@ -735,7 +736,7 @@ journal_t * journal_init_dev(struct block_device *bdev,
        J_ASSERT(bh != NULL);
        journal->j_sb_buffer = bh;
        journal->j_superblock = (journal_superblock_t *)bh->b_data;
-
+out:
        return journal;
 }
 
diff --git a/fs/jbd2/Makefile b/fs/jbd2/Makefile
new file mode 100644 (file)
index 0000000..802a341
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux journaling routines.
+#
+
+obj-$(CONFIG_JBD2) += jbd2.o
+
+jbd2-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
new file mode 100644 (file)
index 0000000..68039fa
--- /dev/null
@@ -0,0 +1,697 @@
+/*
+ * linux/fs/checkpoint.c
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
+ *
+ * Copyright 1999 Red Hat Software --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Checkpoint routines for the generic filesystem journaling code.
+ * Part of the ext2fs journaling system.
+ *
+ * Checkpointing is the process of ensuring that a section of the log is
+ * committed fully to disk, so that that portion of the log can be
+ * reused.
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+/*
+ * Unlink a buffer from a transaction checkpoint list.
+ *
+ * Called with j_list_lock held.
+ */
+static inline void __buffer_unlink_first(struct journal_head *jh)
+{
+       transaction_t *transaction = jh->b_cp_transaction;
+
+       jh->b_cpnext->b_cpprev = jh->b_cpprev;
+       jh->b_cpprev->b_cpnext = jh->b_cpnext;
+       if (transaction->t_checkpoint_list == jh) {
+               transaction->t_checkpoint_list = jh->b_cpnext;
+               if (transaction->t_checkpoint_list == jh)
+                       transaction->t_checkpoint_list = NULL;
+       }
+}
+
+/*
+ * Unlink a buffer from a transaction checkpoint(io) list.
+ *
+ * Called with j_list_lock held.
+ */
+static inline void __buffer_unlink(struct journal_head *jh)
+{
+       transaction_t *transaction = jh->b_cp_transaction;
+
+       __buffer_unlink_first(jh);
+       if (transaction->t_checkpoint_io_list == jh) {
+               transaction->t_checkpoint_io_list = jh->b_cpnext;
+               if (transaction->t_checkpoint_io_list == jh)
+                       transaction->t_checkpoint_io_list = NULL;
+       }
+}
+
+/*
+ * Move a buffer from the checkpoint list to the checkpoint io list
+ *
+ * Called with j_list_lock held
+ */
+static inline void __buffer_relink_io(struct journal_head *jh)
+{
+       transaction_t *transaction = jh->b_cp_transaction;
+
+       __buffer_unlink_first(jh);
+
+       if (!transaction->t_checkpoint_io_list) {
+               jh->b_cpnext = jh->b_cpprev = jh;
+       } else {
+               jh->b_cpnext = transaction->t_checkpoint_io_list;
+               jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
+               jh->b_cpprev->b_cpnext = jh;
+               jh->b_cpnext->b_cpprev = jh;
+       }
+       transaction->t_checkpoint_io_list = jh;
+}
+
+/*
+ * Try to release a checkpointed buffer from its transaction.
+ * Returns 1 if we released it and 2 if we also released the
+ * whole transaction.
+ *
+ * Requires j_list_lock
+ * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
+ */
+static int __try_to_free_cp_buf(struct journal_head *jh)
+{
+       int ret = 0;
+       struct buffer_head *bh = jh2bh(jh);
+
+       if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) {
+               JBUFFER_TRACE(jh, "remove from checkpoint list");
+               ret = __jbd2_journal_remove_checkpoint(jh) + 1;
+               jbd_unlock_bh_state(bh);
+               jbd2_journal_remove_journal_head(bh);
+               BUFFER_TRACE(bh, "release");
+               __brelse(bh);
+       } else {
+               jbd_unlock_bh_state(bh);
+       }
+       return ret;
+}
+
+/*
+ * __jbd2_log_wait_for_space: wait until there is space in the journal.
+ *
+ * Called under j-state_lock *only*.  It will be unlocked if we have to wait
+ * for a checkpoint to free up some space in the log.
+ */
+void __jbd2_log_wait_for_space(journal_t *journal)
+{
+       int nblocks;
+       assert_spin_locked(&journal->j_state_lock);
+
+       nblocks = jbd_space_needed(journal);
+       while (__jbd2_log_space_left(journal) < nblocks) {
+               if (journal->j_flags & JBD2_ABORT)
+                       return;
+               spin_unlock(&journal->j_state_lock);
+               mutex_lock(&journal->j_checkpoint_mutex);
+
+               /*
+                * Test again, another process may have checkpointed while we
+                * were waiting for the checkpoint lock
+                */
+               spin_lock(&journal->j_state_lock);
+               nblocks = jbd_space_needed(journal);
+               if (__jbd2_log_space_left(journal) < nblocks) {
+                       spin_unlock(&journal->j_state_lock);
+                       jbd2_log_do_checkpoint(journal);
+                       spin_lock(&journal->j_state_lock);
+               }
+               mutex_unlock(&journal->j_checkpoint_mutex);
+       }
+}
+
+/*
+ * We were unable to perform jbd_trylock_bh_state() inside j_list_lock.
+ * The caller must restart a list walk.  Wait for someone else to run
+ * jbd_unlock_bh_state().
+ */
+static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
+       __releases(journal->j_list_lock)
+{
+       get_bh(bh);
+       spin_unlock(&journal->j_list_lock);
+       jbd_lock_bh_state(bh);
+       jbd_unlock_bh_state(bh);
+       put_bh(bh);
+}
+
+/*
+ * Clean up transaction's list of buffers submitted for io.
+ * We wait for any pending IO to complete and remove any clean
+ * buffers. Note that we take the buffers in the opposite ordering
+ * from the one in which they were submitted for IO.
+ *
+ * Called with j_list_lock held.
+ */
+static void __wait_cp_io(journal_t *journal, transaction_t *transaction)
+{
+       struct journal_head *jh;
+       struct buffer_head *bh;
+       tid_t this_tid;
+       int released = 0;
+
+       this_tid = transaction->t_tid;
+restart:
+       /* Did somebody clean up the transaction in the meanwhile? */
+       if (journal->j_checkpoint_transactions != transaction ||
+                       transaction->t_tid != this_tid)
+               return;
+       while (!released && transaction->t_checkpoint_io_list) {
+               jh = transaction->t_checkpoint_io_list;
+               bh = jh2bh(jh);
+               if (!jbd_trylock_bh_state(bh)) {
+                       jbd_sync_bh(journal, bh);
+                       spin_lock(&journal->j_list_lock);
+                       goto restart;
+               }
+               if (buffer_locked(bh)) {
+                       atomic_inc(&bh->b_count);
+                       spin_unlock(&journal->j_list_lock);
+                       jbd_unlock_bh_state(bh);
+                       wait_on_buffer(bh);
+                       /* the journal_head may have gone by now */
+                       BUFFER_TRACE(bh, "brelse");
+                       __brelse(bh);
+                       spin_lock(&journal->j_list_lock);
+                       goto restart;
+               }
+               /*
+                * Now in whatever state the buffer currently is, we know that
+                * it has been written out and so we can drop it from the list
+                */
+               released = __jbd2_journal_remove_checkpoint(jh);
+               jbd_unlock_bh_state(bh);
+               jbd2_journal_remove_journal_head(bh);
+               __brelse(bh);
+       }
+}
+
+#define NR_BATCH       64
+
+static void
+__flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
+{
+       int i;
+
+       ll_rw_block(SWRITE, *batch_count, bhs);
+       for (i = 0; i < *batch_count; i++) {
+               struct buffer_head *bh = bhs[i];
+               clear_buffer_jwrite(bh);
+               BUFFER_TRACE(bh, "brelse");
+               __brelse(bh);
+       }
+       *batch_count = 0;
+}
+
+/*
+ * Try to flush one buffer from the checkpoint list to disk.
+ *
+ * Return 1 if something happened which requires us to abort the current
+ * scan of the checkpoint list.
+ *
+ * Called with j_list_lock held and drops it if 1 is returned
+ * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
+ */
+static int __process_buffer(journal_t *journal, struct journal_head *jh,
+                       struct buffer_head **bhs, int *batch_count)
+{
+       struct buffer_head *bh = jh2bh(jh);
+       int ret = 0;
+
+       if (buffer_locked(bh)) {
+               atomic_inc(&bh->b_count);
+               spin_unlock(&journal->j_list_lock);
+               jbd_unlock_bh_state(bh);
+               wait_on_buffer(bh);
+               /* the journal_head may have gone by now */
+               BUFFER_TRACE(bh, "brelse");
+               __brelse(bh);
+               ret = 1;
+       } else if (jh->b_transaction != NULL) {
+               transaction_t *t = jh->b_transaction;
+               tid_t tid = t->t_tid;
+
+               spin_unlock(&journal->j_list_lock);
+               jbd_unlock_bh_state(bh);
+               jbd2_log_start_commit(journal, tid);
+               jbd2_log_wait_commit(journal, tid);
+               ret = 1;
+       } else if (!buffer_dirty(bh)) {
+               J_ASSERT_JH(jh, !buffer_jbddirty(bh));
+               BUFFER_TRACE(bh, "remove from checkpoint");
+               __jbd2_journal_remove_checkpoint(jh);
+               spin_unlock(&journal->j_list_lock);
+               jbd_unlock_bh_state(bh);
+               jbd2_journal_remove_journal_head(bh);
+               __brelse(bh);
+               ret = 1;
+       } else {
+               /*
+                * Important: we are about to write the buffer, and
+                * possibly block, while still holding the journal lock.
+                * We cannot afford to let the transaction logic start
+                * messing around with this buffer before we write it to
+                * disk, as that would break recoverability.
+                */
+               BUFFER_TRACE(bh, "queue");
+               get_bh(bh);
+               J_ASSERT_BH(bh, !buffer_jwrite(bh));
+               set_buffer_jwrite(bh);
+               bhs[*batch_count] = bh;
+               __buffer_relink_io(jh);
+               jbd_unlock_bh_state(bh);
+               (*batch_count)++;
+               if (*batch_count == NR_BATCH) {
+                       spin_unlock(&journal->j_list_lock);
+                       __flush_batch(journal, bhs, batch_count);
+                       ret = 1;
+               }
+       }
+       return ret;
+}
+
+/*
+ * Perform an actual checkpoint. We take the first transaction on the
+ * list of transactions to be checkpointed and send all its buffers
+ * to disk. We submit larger chunks of data at once.
+ *
+ * The journal should be locked before calling this function.
+ */
+int jbd2_log_do_checkpoint(journal_t *journal)
+{
+       transaction_t *transaction;
+       tid_t this_tid;
+       int result;
+
+       jbd_debug(1, "Start checkpoint\n");
+
+       /*
+        * First thing: if there are any transactions in the log which
+        * don't need checkpointing, just eliminate them from the
+        * journal straight away.
+        */
+       result = jbd2_cleanup_journal_tail(journal);
+       jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
+       if (result <= 0)
+               return result;
+
+       /*
+        * OK, we need to start writing disk blocks.  Take one transaction
+        * and write it.
+        */
+       spin_lock(&journal->j_list_lock);
+       if (!journal->j_checkpoint_transactions)
+               goto out;
+       transaction = journal->j_checkpoint_transactions;
+       this_tid = transaction->t_tid;
+restart:
+       /*
+        * If someone cleaned up this transaction while we slept, we're
+        * done (maybe it's a new transaction, but it fell at the same
+        * address).
+        */
+       if (journal->j_checkpoint_transactions == transaction &&
+                       transaction->t_tid == this_tid) {
+               int batch_count = 0;
+               struct buffer_head *bhs[NR_BATCH];
+               struct journal_head *jh;
+               int retry = 0;
+
+               while (!retry && transaction->t_checkpoint_list) {
+                       struct buffer_head *bh;
+
+                       jh = transaction->t_checkpoint_list;
+                       bh = jh2bh(jh);
+                       if (!jbd_trylock_bh_state(bh)) {
+                               jbd_sync_bh(journal, bh);
+                               retry = 1;
+                               break;
+                       }
+                       retry = __process_buffer(journal, jh, bhs,&batch_count);
+                       if (!retry && lock_need_resched(&journal->j_list_lock)){
+                               spin_unlock(&journal->j_list_lock);
+                               retry = 1;
+                               break;
+                       }
+               }
+
+               if (batch_count) {
+                       if (!retry) {
+                               spin_unlock(&journal->j_list_lock);
+                               retry = 1;
+                       }
+                       __flush_batch(journal, bhs, &batch_count);
+               }
+
+               if (retry) {
+                       spin_lock(&journal->j_list_lock);
+                       goto restart;
+               }
+               /*
+                * Now we have cleaned up the first transaction's checkpoint
+                * list. Let's clean up the second one
+                */
+               __wait_cp_io(journal, transaction);
+       }
+out:
+       spin_unlock(&journal->j_list_lock);
+       result = jbd2_cleanup_journal_tail(journal);
+       if (result < 0)
+               return result;
+       return 0;
+}
+
+/*
+ * Check the list of checkpoint transactions for the journal to see if
+ * we have already got rid of any since the last update of the log tail
+ * in the journal superblock.  If so, we can instantly roll the
+ * superblock forward to remove those transactions from the log.
+ *
+ * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
+ *
+ * Called with the journal lock held.
+ *
+ * This is the only part of the journaling code which really needs to be
+ * aware of transaction aborts.  Checkpointing involves writing to the
+ * main filesystem area rather than to the journal, so it can proceed
+ * even in abort state, but we must not update the journal superblock if
+ * we have an abort error outstanding.
+ */
+
+int jbd2_cleanup_journal_tail(journal_t *journal)
+{
+       transaction_t * transaction;
+       tid_t           first_tid;
+       unsigned long   blocknr, freed;
+
+       /* OK, work out the oldest transaction remaining in the log, and
+        * the log block it starts at.
+        *
+        * If the log is now empty, we need to work out which is the
+        * next transaction ID we will write, and where it will
+        * start. */
+
+       spin_lock(&journal->j_state_lock);
+       spin_lock(&journal->j_list_lock);
+       transaction = journal->j_checkpoint_transactions;
+       if (transaction) {
+               first_tid = transaction->t_tid;
+               blocknr = transaction->t_log_start;
+       } else if ((transaction = journal->j_committing_transaction) != NULL) {
+               first_tid = transaction->t_tid;
+               blocknr = transaction->t_log_start;
+       } else if ((transaction = journal->j_running_transaction) != NULL) {
+               first_tid = transaction->t_tid;
+               blocknr = journal->j_head;
+       } else {
+               first_tid = journal->j_transaction_sequence;
+               blocknr = journal->j_head;
+       }
+       spin_unlock(&journal->j_list_lock);
+       J_ASSERT(blocknr != 0);
+
+       /* If the oldest pinned transaction is at the tail of the log
+           already then there's not much we can do right now. */
+       if (journal->j_tail_sequence == first_tid) {
+               spin_unlock(&journal->j_state_lock);
+               return 1;
+       }
+
+       /* OK, update the superblock to recover the freed space.
+        * Physical blocks come first: have we wrapped beyond the end of
+        * the log?  */
+       freed = blocknr - journal->j_tail;
+       if (blocknr < journal->j_tail)
+               freed = freed + journal->j_last - journal->j_first;
+
+       jbd_debug(1,
+                 "Cleaning journal tail from %d to %d (offset %lu), "
+                 "freeing %lu\n",
+                 journal->j_tail_sequence, first_tid, blocknr, freed);
+
+       journal->j_free += freed;
+       journal->j_tail_sequence = first_tid;
+       journal->j_tail = blocknr;
+       spin_unlock(&journal->j_state_lock);
+       if (!(journal->j_flags & JBD2_ABORT))
+               jbd2_journal_update_superblock(journal, 1);
+       return 0;
+}
+
+
+/* Checkpoint list management */
+
+/*
+ * journal_clean_one_cp_list
+ *
+ * Find all the written-back checkpoint buffers in the given list and release them.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ * Returns number of bufers reaped (for debug)
+ */
+
+static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+{
+       struct journal_head *last_jh;
+       struct journal_head *next_jh = jh;
+       int ret, freed = 0;
+
+       *released = 0;
+       if (!jh)
+               return 0;
+
+       last_jh = jh->b_cpprev;
+       do {
+               jh = next_jh;
+               next_jh = jh->b_cpnext;
+               /* Use trylock because of the ranking */
+               if (jbd_trylock_bh_state(jh2bh(jh))) {
+                       ret = __try_to_free_cp_buf(jh);
+                       if (ret) {
+                               freed++;
+                               if (ret == 2) {
+                                       *released = 1;
+                                       return freed;
+                               }
+                       }
+               }
+               /*
+                * This function only frees up some memory
+                * if possible so we dont have an obligation
+                * to finish processing. Bail out if preemption
+                * requested:
+                */
+               if (need_resched())
+                       return freed;
+       } while (jh != last_jh);
+
+       return freed;
+}
+
+/*
+ * journal_clean_checkpoint_list
+ *
+ * Find all the written-back checkpoint buffers in the journal and release them.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ * Returns number of buffers reaped (for debug)
+ */
+
+int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+{
+       transaction_t *transaction, *last_transaction, *next_transaction;
+       int ret = 0;
+       int released;
+
+       transaction = journal->j_checkpoint_transactions;
+       if (!transaction)
+               goto out;
+
+       last_transaction = transaction->t_cpprev;
+       next_transaction = transaction;
+       do {
+               transaction = next_transaction;
+               next_transaction = transaction->t_cpnext;
+               ret += journal_clean_one_cp_list(transaction->
+                               t_checkpoint_list, &released);
+               /*
+                * This function only frees up some memory if possible so we
+                * dont have an obligation to finish processing. Bail out if
+                * preemption requested:
+                */
+               if (need_resched())
+                       goto out;
+               if (released)
+                       continue;
+               /*
+                * It is essential that we are as careful as in the case of
+                * t_checkpoint_list with removing the buffer from the list as
+                * we can possibly see not yet submitted buffers on io_list
+                */
+               ret += journal_clean_one_cp_list(transaction->
+                               t_checkpoint_io_list, &released);
+               if (need_resched())
+                       goto out;
+       } while (transaction != last_transaction);
+out:
+       return ret;
+}
+
+/*
+ * journal_remove_checkpoint: called after a buffer has been committed
+ * to disk (either by being write-back flushed to disk, or being
+ * committed to the log).
+ *
+ * We cannot safely clean a transaction out of the log until all of the
+ * buffer updates committed in that transaction have safely been stored
+ * elsewhere on disk.  To achieve this, all of the buffers in a
+ * transaction need to be maintained on the transaction's checkpoint
+ * lists until they have been rewritten, at which point this function is
+ * called to remove the buffer from the existing transaction's
+ * checkpoint lists.
+ *
+ * The function returns 1 if it frees the transaction, 0 otherwise.
+ *
+ * This function is called with the journal locked.
+ * This function is called with j_list_lock held.
+ * This function is called with jbd_lock_bh_state(jh2bh(jh))
+ */
+
+int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
+{
+       transaction_t *transaction;
+       journal_t *journal;
+       int ret = 0;
+
+       JBUFFER_TRACE(jh, "entry");
+
+       if ((transaction = jh->b_cp_transaction) == NULL) {
+               JBUFFER_TRACE(jh, "not on transaction");
+               goto out;
+       }
+       journal = transaction->t_journal;
+
+       __buffer_unlink(jh);
+       jh->b_cp_transaction = NULL;
+
+       if (transaction->t_checkpoint_list != NULL ||
+           transaction->t_checkpoint_io_list != NULL)
+               goto out;
+       JBUFFER_TRACE(jh, "transaction has no more buffers");
+
+       /*
+        * There is one special case to worry about: if we have just pulled the
+        * buffer off a committing transaction's forget list, then even if the
+        * checkpoint list is empty, the transaction obviously cannot be
+        * dropped!
+        *
+        * The locking here around j_committing_transaction is a bit sleazy.
+        * See the comment at the end of jbd2_journal_commit_transaction().
+        */
+       if (transaction == journal->j_committing_transaction) {
+               JBUFFER_TRACE(jh, "belongs to committing transaction");
+               goto out;
+       }
+
+       /* OK, that was the last buffer for the transaction: we can now
+          safely remove this transaction from the log */
+
+       __jbd2_journal_drop_transaction(journal, transaction);
+
+       /* Just in case anybody was waiting for more transactions to be
+           checkpointed... */
+       wake_up(&journal->j_wait_logspace);
+       ret = 1;
+out:
+       JBUFFER_TRACE(jh, "exit");
+       return ret;
+}
+
+/*
+ * journal_insert_checkpoint: put a committed buffer onto a checkpoint
+ * list so that we know when it is safe to clean the transaction out of
+ * the log.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ */
+void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
+                              transaction_t *transaction)
+{
+       JBUFFER_TRACE(jh, "entry");
+       J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
+       J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
+
+       jh->b_cp_transaction = transaction;
+
+       if (!transaction->t_checkpoint_list) {
+               jh->b_cpnext = jh->b_cpprev = jh;
+       } else {
+               jh->b_cpnext = transaction->t_checkpoint_list;
+               jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev;
+               jh->b_cpprev->b_cpnext = jh;
+               jh->b_cpnext->b_cpprev = jh;
+       }
+       transaction->t_checkpoint_list = jh;
+}
+
+/*
+ * We've finished with this transaction structure: adios...
+ *
+ * The transaction must have no links except for the checkpoint by this
+ * point.
+ *
+ * Called with the journal locked.
+ * Called with j_list_lock held.
+ */
+
+void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction)
+{
+       assert_spin_locked(&journal->j_list_lock);
+       if (transaction->t_cpnext) {
+               transaction->t_cpnext->t_cpprev = transaction->t_cpprev;
+               transaction->t_cpprev->t_cpnext = transaction->t_cpnext;
+               if (journal->j_checkpoint_transactions == transaction)
+                       journal->j_checkpoint_transactions =
+                               transaction->t_cpnext;
+               if (journal->j_checkpoint_transactions == transaction)
+                       journal->j_checkpoint_transactions = NULL;
+       }
+
+       J_ASSERT(transaction->t_state == T_FINISHED);
+       J_ASSERT(transaction->t_buffers == NULL);
+       J_ASSERT(transaction->t_sync_datalist == NULL);
+       J_ASSERT(transaction->t_forget == NULL);
+       J_ASSERT(transaction->t_iobuf_list == NULL);
+       J_ASSERT(transaction->t_shadow_list == NULL);
+       J_ASSERT(transaction->t_log_list == NULL);
+       J_ASSERT(transaction->t_checkpoint_list == NULL);
+       J_ASSERT(transaction->t_checkpoint_io_list == NULL);
+       J_ASSERT(transaction->t_updates == 0);
+       J_ASSERT(journal->j_committing_transaction != transaction);
+       J_ASSERT(journal->j_running_transaction != transaction);
+
+       jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
+       kfree(transaction);
+}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
new file mode 100644 (file)
index 0000000..70b2ae1
--- /dev/null
@@ -0,0 +1,920 @@
+/*
+ * linux/fs/jbd2/commit.c
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
+ *
+ * Copyright 1998 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Journal commit routines for the generic filesystem journaling code;
+ * part of the ext2fs journaling system.
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/smp_lock.h>
+
+/*
+ * Default IO end handler for temporary BJ_IO buffer_heads.
+ */
+static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
+{
+       BUFFER_TRACE(bh, "");
+       if (uptodate)
+               set_buffer_uptodate(bh);
+       else
+               clear_buffer_uptodate(bh);
+       unlock_buffer(bh);
+}
+
+/*
+ * When an ext3-ordered file is truncated, it is possible that many pages are
+ * not sucessfully freed, because they are attached to a committing transaction.
+ * After the transaction commits, these pages are left on the LRU, with no
+ * ->mapping, and with attached buffers.  These pages are trivially reclaimable
+ * by the VM, but their apparent absence upsets the VM accounting, and it makes
+ * the numbers in /proc/meminfo look odd.
+ *
+ * So here, we have a buffer which has just come off the forget list.  Look to
+ * see if we can strip all buffers from the backing page.
+ *
+ * Called under lock_journal(), and possibly under journal_datalist_lock.  The
+ * caller provided us with a ref against the buffer, and we drop that here.
+ */
+static void release_buffer_page(struct buffer_head *bh)
+{
+       struct page *page;
+
+       if (buffer_dirty(bh))
+               goto nope;
+       if (atomic_read(&bh->b_count) != 1)
+               goto nope;
+       page = bh->b_page;
+       if (!page)
+               goto nope;
+       if (page->mapping)
+               goto nope;
+
+       /* OK, it's a truncated page */
+       if (TestSetPageLocked(page))
+               goto nope;
+
+       page_cache_get(page);
+       __brelse(bh);
+       try_to_free_buffers(page);
+       unlock_page(page);
+       page_cache_release(page);
+       return;
+
+nope:
+       __brelse(bh);
+}
+
+/*
+ * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
+ * held.  For ranking reasons we must trylock.  If we lose, schedule away and
+ * return 0.  j_list_lock is dropped in this case.
+ */
+static int inverted_lock(journal_t *journal, struct buffer_head *bh)
+{
+       if (!jbd_trylock_bh_state(bh)) {
+               spin_unlock(&journal->j_list_lock);
+               schedule();
+               return 0;
+       }
+       return 1;
+}
+
+/* Done it all: now write the commit record.  We should have
+ * cleaned up our previous buffers by now, so if we are in abort
+ * mode we can now just skip the rest of the journal write
+ * entirely.
+ *
+ * Returns 1 if the journal needs to be aborted or 0 on success
+ */
+static int journal_write_commit_record(journal_t *journal,
+                                       transaction_t *commit_transaction)
+{
+       struct journal_head *descriptor;
+       struct buffer_head *bh;
+       int i, ret;
+       int barrier_done = 0;
+
+       if (is_journal_aborted(journal))
+               return 0;
+
+       descriptor = jbd2_journal_get_descriptor_buffer(journal);
+       if (!descriptor)
+               return 1;
+
+       bh = jh2bh(descriptor);
+
+       /* AKPM: buglet - add `i' to tmp! */
+       for (i = 0; i < bh->b_size; i += 512) {
+               journal_header_t *tmp = (journal_header_t*)bh->b_data;
+               tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
+               tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
+               tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
+       }
+
+       JBUFFER_TRACE(descriptor, "write commit block");
+       set_buffer_dirty(bh);
+       if (journal->j_flags & JBD2_BARRIER) {
+               set_buffer_ordered(bh);
+               barrier_done = 1;
+       }
+       ret = sync_dirty_buffer(bh);
+       /* is it possible for another commit to fail at roughly
+        * the same time as this one?  If so, we don't want to
+        * trust the barrier flag in the super, but instead want
+        * to remember if we sent a barrier request
+        */
+       if (ret == -EOPNOTSUPP && barrier_done) {
+               char b[BDEVNAME_SIZE];
+
+               printk(KERN_WARNING
+                       "JBD: barrier-based sync failed on %s - "
+                       "disabling barriers\n",
+                       bdevname(journal->j_dev, b));
+               spin_lock(&journal->j_state_lock);
+               journal->j_flags &= ~JBD2_BARRIER;
+               spin_unlock(&journal->j_state_lock);
+
+               /* And try again, without the barrier */
+               clear_buffer_ordered(bh);
+               set_buffer_uptodate(bh);
+               set_buffer_dirty(bh);
+               ret = sync_dirty_buffer(bh);
+       }
+       put_bh(bh);             /* One for getblk() */
+       jbd2_journal_put_journal_head(descriptor);
+
+       return (ret == -EIO);
+}
+
+static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
+{
+       int i;
+
+       for (i = 0; i < bufs; i++) {
+               wbuf[i]->b_end_io = end_buffer_write_sync;
+               /* We use-up our safety reference in submit_bh() */
+               submit_bh(WRITE, wbuf[i]);
+       }
+}
+
+/*
+ *  Submit all the data buffers to disk
+ */
+static void journal_submit_data_buffers(journal_t *journal,
+                               transaction_t *commit_transaction)
+{
+       struct journal_head *jh;
+       struct buffer_head *bh;
+       int locked;
+       int bufs = 0;
+       struct buffer_head **wbuf = journal->j_wbuf;
+
+       /*
+        * Whenever we unlock the journal and sleep, things can get added
+        * onto ->t_sync_datalist, so we have to keep looping back to
+        * write_out_data until we *know* that the list is empty.
+        *
+        * Cleanup any flushed data buffers from the data list.  Even in
+        * abort mode, we want to flush this out as soon as possible.
+        */
+write_out_data:
+       cond_resched();
+       spin_lock(&journal->j_list_lock);
+
+       while (commit_transaction->t_sync_datalist) {
+               jh = commit_transaction->t_sync_datalist;
+               bh = jh2bh(jh);
+               locked = 0;
+
+               /* Get reference just to make sure buffer does not disappear
+                * when we are forced to drop various locks */
+               get_bh(bh);
+               /* If the buffer is dirty, we need to submit IO and hence
+                * we need the buffer lock. We try to lock the buffer without
+                * blocking. If we fail, we need to drop j_list_lock and do
+                * blocking lock_buffer().
+                */
+               if (buffer_dirty(bh)) {
+                       if (test_set_buffer_locked(bh)) {
+                               BUFFER_TRACE(bh, "needs blocking lock");
+                               spin_unlock(&journal->j_list_lock);
+                               /* Write out all data to prevent deadlocks */
+                               journal_do_submit_data(wbuf, bufs);
+                               bufs = 0;
+                               lock_buffer(bh);
+                               spin_lock(&journal->j_list_lock);
+                       }
+                       locked = 1;
+               }
+               /* We have to get bh_state lock. Again out of order, sigh. */
+               if (!inverted_lock(journal, bh)) {
+                       jbd_lock_bh_state(bh);
+                       spin_lock(&journal->j_list_lock);
+               }
+               /* Someone already cleaned up the buffer? */
+               if (!buffer_jbd(bh)
+                       || jh->b_transaction != commit_transaction
+                       || jh->b_jlist != BJ_SyncData) {
+                       jbd_unlock_bh_state(bh);
+                       if (locked)
+                               unlock_buffer(bh);
+                       BUFFER_TRACE(bh, "already cleaned up");
+                       put_bh(bh);
+                       continue;
+               }
+               if (locked && test_clear_buffer_dirty(bh)) {
+                       BUFFER_TRACE(bh, "needs writeout, adding to array");
+                       wbuf[bufs++] = bh;
+                       __jbd2_journal_file_buffer(jh, commit_transaction,
+                                               BJ_Locked);
+                       jbd_unlock_bh_state(bh);
+                       if (bufs == journal->j_wbufsize) {
+                               spin_unlock(&journal->j_list_lock);
+                               journal_do_submit_data(wbuf, bufs);
+                               bufs = 0;
+                               goto write_out_data;
+                       }
+               }
+               else {
+                       BUFFER_TRACE(bh, "writeout complete: unfile");
+                       __jbd2_journal_unfile_buffer(jh);
+                       jbd_unlock_bh_state(bh);
+                       if (locked)
+                               unlock_buffer(bh);
+                       jbd2_journal_remove_journal_head(bh);
+                       /* Once for our safety reference, once for
+                        * jbd2_journal_remove_journal_head() */
+                       put_bh(bh);
+                       put_bh(bh);
+               }
+
+               if (lock_need_resched(&journal->j_list_lock)) {
+                       spin_unlock(&journal->j_list_lock);
+                       goto write_out_data;
+               }
+       }
+       spin_unlock(&journal->j_list_lock);
+       journal_do_submit_data(wbuf, bufs);
+}
+
+static inline void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
+                                  unsigned long long block)
+{
+       tag->t_blocknr = cpu_to_be32(block & (u32)~0);
+       if (tag_bytes > JBD_TAG_SIZE32)
+               tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
+}
+
+/*
+ * jbd2_journal_commit_transaction
+ *
+ * The primary function for committing a transaction to the log.  This
+ * function is called by the journal thread to begin a complete commit.
+ */
+void jbd2_journal_commit_transaction(journal_t *journal)
+{
+       transaction_t *commit_transaction;
+       struct journal_head *jh, *new_jh, *descriptor;
+       struct buffer_head **wbuf = journal->j_wbuf;
+       int bufs;
+       int flags;
+       int err;
+       unsigned long long blocknr;
+       char *tagp = NULL;
+       journal_header_t *header;
+       journal_block_tag_t *tag = NULL;
+       int space_left = 0;
+       int first_tag = 0;
+       int tag_flag;
+       int i;
+       int tag_bytes = journal_tag_bytes(journal);
+
+       /*
+        * First job: lock down the current transaction and wait for
+        * all outstanding updates to complete.
+        */
+
+#ifdef COMMIT_STATS
+       spin_lock(&journal->j_list_lock);
+       summarise_journal_usage(journal);
+       spin_unlock(&journal->j_list_lock);
+#endif
+
+       /* Do we need to erase the effects of a prior jbd2_journal_flush? */
+       if (journal->j_flags & JBD2_FLUSHED) {
+               jbd_debug(3, "super block updated\n");
+               jbd2_journal_update_superblock(journal, 1);
+       } else {
+               jbd_debug(3, "superblock not updated\n");
+       }
+
+       J_ASSERT(journal->j_running_transaction != NULL);
+       J_ASSERT(journal->j_committing_transaction == NULL);
+
+       commit_transaction = journal->j_running_transaction;
+       J_ASSERT(commit_transaction->t_state == T_RUNNING);
+
+       jbd_debug(1, "JBD: starting commit of transaction %d\n",
+                       commit_transaction->t_tid);
+
+       spin_lock(&journal->j_state_lock);
+       commit_transaction->t_state = T_LOCKED;
+
+       spin_lock(&commit_transaction->t_handle_lock);
+       while (commit_transaction->t_updates) {
+               DEFINE_WAIT(wait);
+
+               prepare_to_wait(&journal->j_wait_updates, &wait,
+                                       TASK_UNINTERRUPTIBLE);
+               if (commit_transaction->t_updates) {
+                       spin_unlock(&commit_transaction->t_handle_lock);
+                       spin_unlock(&journal->j_state_lock);
+                       schedule();
+                       spin_lock(&journal->j_state_lock);
+                       spin_lock(&commit_transaction->t_handle_lock);
+               }
+               finish_wait(&journal->j_wait_updates, &wait);
+       }
+       spin_unlock(&commit_transaction->t_handle_lock);
+
+       J_ASSERT (commit_transaction->t_outstanding_credits <=
+                       journal->j_max_transaction_buffers);
+
+       /*
+        * First thing we are allowed to do is to discard any remaining
+        * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
+        * that there are no such buffers: if a large filesystem
+        * operation like a truncate needs to split itself over multiple
+        * transactions, then it may try to do a jbd2_journal_restart() while
+        * there are still BJ_Reserved buffers outstanding.  These must
+        * be released cleanly from the current transaction.
+        *
+        * In this case, the filesystem must still reserve write access
+        * again before modifying the buffer in the new transaction, but
+        * we do not require it to remember exactly which old buffers it
+        * has reserved.  This is consistent with the existing behaviour
+        * that multiple jbd2_journal_get_write_access() calls to the same
+        * buffer are perfectly permissable.
+        */
+       while (commit_transaction->t_reserved_list) {
+               jh = commit_transaction->t_reserved_list;
+               JBUFFER_TRACE(jh, "reserved, unused: refile");
+               /*
+                * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
+                * leave undo-committed data.
+                */
+               if (jh->b_committed_data) {
+                       struct buffer_head *bh = jh2bh(jh);
+
+                       jbd_lock_bh_state(bh);
+                       jbd2_slab_free(jh->b_committed_data, bh->b_size);
+                       jh->b_committed_data = NULL;
+                       jbd_unlock_bh_state(bh);
+               }
+               jbd2_journal_refile_buffer(journal, jh);
+       }
+
+       /*
+        * Now try to drop any written-back buffers from the journal's
+        * checkpoint lists.  We do this *before* commit because it potentially
+        * frees some memory
+        */
+       spin_lock(&journal->j_list_lock);
+       __jbd2_journal_clean_checkpoint_list(journal);
+       spin_unlock(&journal->j_list_lock);
+
+       jbd_debug (3, "JBD: commit phase 1\n");
+
+       /*
+        * Switch to a new revoke table.
+        */
+       jbd2_journal_switch_revoke_table(journal);
+
+       commit_transaction->t_state = T_FLUSH;
+       journal->j_committing_transaction = commit_transaction;
+       journal->j_running_transaction = NULL;
+       commit_transaction->t_log_start = journal->j_head;
+       wake_up(&journal->j_wait_transaction_locked);
+       spin_unlock(&journal->j_state_lock);
+
+       jbd_debug (3, "JBD: commit phase 2\n");
+
+       /*
+        * First, drop modified flag: all accesses to the buffers
+        * will be tracked for a new trasaction only -bzzz
+        */
+       spin_lock(&journal->j_list_lock);
+       if (commit_transaction->t_buffers) {
+               new_jh = jh = commit_transaction->t_buffers->b_tnext;
+               do {
+                       J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
+                                       new_jh->b_modified == 0);
+                       new_jh->b_modified = 0;
+                       new_jh = new_jh->b_tnext;
+               } while (new_jh != jh);
+       }
+       spin_unlock(&journal->j_list_lock);
+
+       /*
+        * Now start flushing things to disk, in the order they appear
+        * on the transaction lists.  Data blocks go first.
+        */
+       err = 0;
+       journal_submit_data_buffers(journal, commit_transaction);
+
+       /*
+        * Wait for all previously submitted IO to complete.
+        */
+       spin_lock(&journal->j_list_lock);
+       while (commit_transaction->t_locked_list) {
+               struct buffer_head *bh;
+
+               jh = commit_transaction->t_locked_list->b_tprev;
+               bh = jh2bh(jh);
+               get_bh(bh);
+               if (buffer_locked(bh)) {
+                       spin_unlock(&journal->j_list_lock);
+                       wait_on_buffer(bh);
+                       if (unlikely(!buffer_uptodate(bh)))
+                               err = -EIO;
+                       spin_lock(&journal->j_list_lock);
+               }
+               if (!inverted_lock(journal, bh)) {
+                       put_bh(bh);
+                       spin_lock(&journal->j_list_lock);
+                       continue;
+               }
+               if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
+                       __jbd2_journal_unfile_buffer(jh);
+                       jbd_unlock_bh_state(bh);
+                       jbd2_journal_remove_journal_head(bh);
+                       put_bh(bh);
+               } else {
+                       jbd_unlock_bh_state(bh);
+               }
+               put_bh(bh);
+               cond_resched_lock(&journal->j_list_lock);
+       }
+       spin_unlock(&journal->j_list_lock);
+
+       if (err)
+               __jbd2_journal_abort_hard(journal);
+
+       jbd2_journal_write_revoke_records(journal, commit_transaction);
+
+       jbd_debug(3, "JBD: commit phase 2\n");
+
+       /*
+        * If we found any dirty or locked buffers, then we should have
+        * looped back up to the write_out_data label.  If there weren't
+        * any then journal_clean_data_list should have wiped the list
+        * clean by now, so check that it is in fact empty.
+        */
+       J_ASSERT (commit_transaction->t_sync_datalist == NULL);
+
+       jbd_debug (3, "JBD: commit phase 3\n");
+
+       /*
+        * Way to go: we have now written out all of the data for a
+        * transaction!  Now comes the tricky part: we need to write out
+        * metadata.  Loop over the transaction's entire buffer list:
+        */
+       commit_transaction->t_state = T_COMMIT;
+
+       descriptor = NULL;
+       bufs = 0;
+       while (commit_transaction->t_buffers) {
+
+               /* Find the next buffer to be journaled... */
+
+               jh = commit_transaction->t_buffers;
+
+               /* If we're in abort mode, we just un-journal the buffer and
+                  release it for background writing. */
+
+               if (is_journal_aborted(journal)) {
+                       JBUFFER_TRACE(jh, "journal is aborting: refile");
+                       jbd2_journal_refile_buffer(journal, jh);
+                       /* If that was the last one, we need to clean up
+                        * any descriptor buffers which may have been
+                        * already allocated, even if we are now
+                        * aborting. */
+                       if (!commit_transaction->t_buffers)
+                               goto start_journal_io;
+                       continue;
+               }
+
+               /* Make sure we have a descriptor block in which to
+                  record the metadata buffer. */
+
+               if (!descriptor) {
+                       struct buffer_head *bh;
+
+                       J_ASSERT (bufs == 0);
+
+                       jbd_debug(4, "JBD: get descriptor\n");
+
+                       descriptor = jbd2_journal_get_descriptor_buffer(journal);
+                       if (!descriptor) {
+                               __jbd2_journal_abort_hard(journal);
+                               continue;
+                       }
+
+                       bh = jh2bh(descriptor);
+                       jbd_debug(4, "JBD: got buffer %llu (%p)\n",
+                               (unsigned long long)bh->b_blocknr, bh->b_data);
+                       header = (journal_header_t *)&bh->b_data[0];
+                       header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
+                       header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
+                       header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
+
+                       tagp = &bh->b_data[sizeof(journal_header_t)];
+                       space_left = bh->b_size - sizeof(journal_header_t);
+                       first_tag = 1;
+                       set_buffer_jwrite(bh);
+                       set_buffer_dirty(bh);
+                       wbuf[bufs++] = bh;
+
+                       /* Record it so that we can wait for IO
+                           completion later */
+                       BUFFER_TRACE(bh, "ph3: file as descriptor");
+                       jbd2_journal_file_buffer(descriptor, commit_transaction,
+                                       BJ_LogCtl);
+               }
+
+               /* Where is the buffer to be written? */
+
+               err = jbd2_journal_next_log_block(journal, &blocknr);
+               /* If the block mapping failed, just abandon the buffer
+                  and repeat this loop: we'll fall into the
+                  refile-on-abort condition above. */
+               if (err) {
+                       __jbd2_journal_abort_hard(journal);
+                       continue;
+               }
+
+               /*
+                * start_this_handle() uses t_outstanding_credits to determine
+                * the free space in the log, but this counter is changed
+                * by jbd2_journal_next_log_block() also.
+                */
+               commit_transaction->t_outstanding_credits--;
+
+               /* Bump b_count to prevent truncate from stumbling over
+                   the shadowed buffer!  @@@ This can go if we ever get
+                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
+               atomic_inc(&jh2bh(jh)->b_count);
+
+               /* Make a temporary IO buffer with which to write it out
+                   (this will requeue both the metadata buffer and the
+                   temporary IO buffer). new_bh goes on BJ_IO*/
+
+               set_bit(BH_JWrite, &jh2bh(jh)->b_state);
+               /*
+                * akpm: jbd2_journal_write_metadata_buffer() sets
+                * new_bh->b_transaction to commit_transaction.
+                * We need to clean this up before we release new_bh
+                * (which is of type BJ_IO)
+                */
+               JBUFFER_TRACE(jh, "ph3: write metadata");
+               flags = jbd2_journal_write_metadata_buffer(commit_transaction,
+                                                     jh, &new_jh, blocknr);
+               set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
+               wbuf[bufs++] = jh2bh(new_jh);
+
+               /* Record the new block's tag in the current descriptor
+                   buffer */
+
+               tag_flag = 0;
+               if (flags & 1)
+                       tag_flag |= JBD2_FLAG_ESCAPE;
+               if (!first_tag)
+                       tag_flag |= JBD2_FLAG_SAME_UUID;
+
+               tag = (journal_block_tag_t *) tagp;
+               write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
+               tag->t_flags = cpu_to_be32(tag_flag);
+               tagp += tag_bytes;
+               space_left -= tag_bytes;
+
+               if (first_tag) {
+                       memcpy (tagp, journal->j_uuid, 16);
+                       tagp += 16;
+                       space_left -= 16;
+                       first_tag = 0;
+               }
+
+               /* If there's no more to do, or if the descriptor is full,
+                  let the IO rip! */
+
+               if (bufs == journal->j_wbufsize ||
+                   commit_transaction->t_buffers == NULL ||
+                   space_left < tag_bytes + 16) {
+
+                       jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
+
+                       /* Write an end-of-descriptor marker before
+                           submitting the IOs.  "tag" still points to
+                           the last tag we set up. */
+
+                       tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
+
+start_journal_io:
+                       for (i = 0; i < bufs; i++) {
+                               struct buffer_head *bh = wbuf[i];
+                               lock_buffer(bh);
+                               clear_buffer_dirty(bh);
+                               set_buffer_uptodate(bh);
+                               bh->b_end_io = journal_end_buffer_io_sync;
+                               submit_bh(WRITE, bh);
+                       }
+                       cond_resched();
+
+                       /* Force a new descriptor to be generated next
+                           time round the loop. */
+                       descriptor = NULL;
+                       bufs = 0;
+               }
+       }
+
+       /* Lo and behold: we have just managed to send a transaction to
+           the log.  Before we can commit it, wait for the IO so far to
+           complete.  Control buffers being written are on the
+           transaction's t_log_list queue, and metadata buffers are on
+           the t_iobuf_list queue.
+
+          Wait for the buffers in reverse order.  That way we are
+          less likely to be woken up until all IOs have completed, and
+          so we incur less scheduling load.
+       */
+
+       jbd_debug(3, "JBD: commit phase 4\n");
+
+       /*
+        * akpm: these are BJ_IO, and j_list_lock is not needed.
+        * See __journal_try_to_free_buffer.
+        */
+wait_for_iobuf:
+       while (commit_transaction->t_iobuf_list != NULL) {
+               struct buffer_head *bh;
+
+               jh = commit_transaction->t_iobuf_list->b_tprev;
+               bh = jh2bh(jh);
+               if (buffer_locked(bh)) {
+                       wait_on_buffer(bh);
+                       goto wait_for_iobuf;
+               }
+               if (cond_resched())
+                       goto wait_for_iobuf;
+
+               if (unlikely(!buffer_uptodate(bh)))
+                       err = -EIO;
+
+               clear_buffer_jwrite(bh);
+
+               JBUFFER_TRACE(jh, "ph4: unfile after journal write");
+               jbd2_journal_unfile_buffer(journal, jh);
+
+               /*
+                * ->t_iobuf_list should contain only dummy buffer_heads
+                * which were created by jbd2_journal_write_metadata_buffer().
+                */
+               BUFFER_TRACE(bh, "dumping temporary bh");
+               jbd2_journal_put_journal_head(jh);
+               __brelse(bh);
+               J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
+               free_buffer_head(bh);
+
+               /* We also have to unlock and free the corresponding
+                   shadowed buffer */
+               jh = commit_transaction->t_shadow_list->b_tprev;
+               bh = jh2bh(jh);
+               clear_bit(BH_JWrite, &bh->b_state);
+               J_ASSERT_BH(bh, buffer_jbddirty(bh));
+
+               /* The metadata is now released for reuse, but we need
+                   to remember it against this transaction so that when
+                   we finally commit, we can do any checkpointing
+                   required. */
+               JBUFFER_TRACE(jh, "file as BJ_Forget");
+               jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
+               /* Wake up any transactions which were waiting for this
+                  IO to complete */
+               wake_up_bit(&bh->b_state, BH_Unshadow);
+               JBUFFER_TRACE(jh, "brelse shadowed buffer");
+               __brelse(bh);
+       }
+
+       J_ASSERT (commit_transaction->t_shadow_list == NULL);
+
+       jbd_debug(3, "JBD: commit phase 5\n");
+
+       /* Here we wait for the revoke record and descriptor record buffers */
+ wait_for_ctlbuf:
+       while (commit_transaction->t_log_list != NULL) {
+               struct buffer_head *bh;
+
+               jh = commit_transaction->t_log_list->b_tprev;
+               bh = jh2bh(jh);
+               if (buffer_locked(bh)) {
+                       wait_on_buffer(bh);
+                       goto wait_for_ctlbuf;
+               }
+               if (cond_resched())
+                       goto wait_for_ctlbuf;
+
+               if (unlikely(!buffer_uptodate(bh)))
+                       err = -EIO;
+
+               BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
+               clear_buffer_jwrite(bh);
+               jbd2_journal_unfile_buffer(journal, jh);
+               jbd2_journal_put_journal_head(jh);
+               __brelse(bh);           /* One for getblk */
+               /* AKPM: bforget here */
+       }
+
+       jbd_debug(3, "JBD: commit phase 6\n");
+
+       if (journal_write_commit_record(journal, commit_transaction))
+               err = -EIO;
+
+       if (err)
+               __jbd2_journal_abort_hard(journal);
+
+       /* End of a transaction!  Finally, we can do checkpoint
+           processing: any buffers committed as a result of this
+           transaction can be removed from any checkpoint list it was on
+           before. */
+
+       jbd_debug(3, "JBD: commit phase 7\n");
+
+       J_ASSERT(commit_transaction->t_sync_datalist == NULL);
+       J_ASSERT(commit_transaction->t_buffers == NULL);
+       J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
+       J_ASSERT(commit_transaction->t_iobuf_list == NULL);
+       J_ASSERT(commit_transaction->t_shadow_list == NULL);
+       J_ASSERT(commit_transaction->t_log_list == NULL);
+
+restart_loop:
+       /*
+        * As there are other places (journal_unmap_buffer()) adding buffers
+        * to this list we have to be careful and hold the j_list_lock.
+        */
+       spin_lock(&journal->j_list_lock);
+       while (commit_transaction->t_forget) {
+               transaction_t *cp_transaction;
+               struct buffer_head *bh;
+
+               jh = commit_transaction->t_forget;
+               spin_unlock(&journal->j_list_lock);
+               bh = jh2bh(jh);
+               jbd_lock_bh_state(bh);
+               J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
+                       jh->b_transaction == journal->j_running_transaction);
+
+               /*
+                * If there is undo-protected committed data against
+                * this buffer, then we can remove it now.  If it is a
+                * buffer needing such protection, the old frozen_data
+                * field now points to a committed version of the
+                * buffer, so rotate that field to the new committed
+                * data.
+                *
+                * Otherwise, we can just throw away the frozen data now.
+                */
+               if (jh->b_committed_data) {
+                       jbd2_slab_free(jh->b_committed_data, bh->b_size);
+                       jh->b_committed_data = NULL;
+                       if (jh->b_frozen_data) {
+                               jh->b_committed_data = jh->b_frozen_data;
+                               jh->b_frozen_data = NULL;
+                       }
+               } else if (jh->b_frozen_data) {
+                       jbd2_slab_free(jh->b_frozen_data, bh->b_size);
+                       jh->b_frozen_data = NULL;
+               }
+
+               spin_lock(&journal->j_list_lock);
+               cp_transaction = jh->b_cp_transaction;
+               if (cp_transaction) {
+                       JBUFFER_TRACE(jh, "remove from old cp transaction");
+                       __jbd2_journal_remove_checkpoint(jh);
+               }
+
+               /* Only re-checkpoint the buffer_head if it is marked
+                * dirty.  If the buffer was added to the BJ_Forget list
+                * by jbd2_journal_forget, it may no longer be dirty and
+                * there's no point in keeping a checkpoint record for
+                * it. */
+
+               /* A buffer which has been freed while still being
+                * journaled by a previous transaction may end up still
+                * being dirty here, but we want to avoid writing back
+                * that buffer in the future now that the last use has
+                * been committed.  That's not only a performance gain,
+                * it also stops aliasing problems if the buffer is left
+                * behind for writeback and gets reallocated for another
+                * use in a different page. */
+               if (buffer_freed(bh)) {
+                       clear_buffer_freed(bh);
+                       clear_buffer_jbddirty(bh);
+               }
+
+               if (buffer_jbddirty(bh)) {
+                       JBUFFER_TRACE(jh, "add to new checkpointing trans");
+                       __jbd2_journal_insert_checkpoint(jh, commit_transaction);
+                       JBUFFER_TRACE(jh, "refile for checkpoint writeback");
+                       __jbd2_journal_refile_buffer(jh);
+                       jbd_unlock_bh_state(bh);
+               } else {
+                       J_ASSERT_BH(bh, !buffer_dirty(bh));
+                       /* The buffer on BJ_Forget list and not jbddirty means
+                        * it has been freed by this transaction and hence it
+                        * could not have been reallocated until this
+                        * transaction has committed. *BUT* it could be
+                        * reallocated once we have written all the data to
+                        * disk and before we process the buffer on BJ_Forget
+                        * list. */
+                       JBUFFER_TRACE(jh, "refile or unfile freed buffer");
+                       __jbd2_journal_refile_buffer(jh);
+                       if (!jh->b_transaction) {
+                               jbd_unlock_bh_state(bh);
+                                /* needs a brelse */
+                               jbd2_journal_remove_journal_head(bh);
+                               release_buffer_page(bh);
+                       } else
+                               jbd_unlock_bh_state(bh);
+               }
+               cond_resched_lock(&journal->j_list_lock);
+       }
+       spin_unlock(&journal->j_list_lock);
+       /*
+        * This is a bit sleazy.  We borrow j_list_lock to protect
+        * journal->j_committing_transaction in __jbd2_journal_remove_checkpoint.
+        * Really, __jbd2_journal_remove_checkpoint should be using j_state_lock but
+        * it's a bit hassle to hold that across __jbd2_journal_remove_checkpoint
+        */
+       spin_lock(&journal->j_state_lock);
+       spin_lock(&journal->j_list_lock);
+       /*
+        * Now recheck if some buffers did not get attached to the transaction
+        * while the lock was dropped...
+        */
+       if (commit_transaction->t_forget) {
+               spin_unlock(&journal->j_list_lock);
+               spin_unlock(&journal->j_state_lock);
+               goto restart_loop;
+       }
+
+       /* Done with this transaction! */
+
+       jbd_debug(3, "JBD: commit phase 8\n");
+
+       J_ASSERT(commit_transaction->t_state == T_COMMIT);
+
+       commit_transaction->t_state = T_FINISHED;
+       J_ASSERT(commit_transaction == journal->j_committing_transaction);
+       journal->j_commit_sequence = commit_transaction->t_tid;
+       journal->j_committing_transaction = NULL;
+       spin_unlock(&journal->j_state_lock);
+
+       if (commit_transaction->t_checkpoint_list == NULL) {
+               __jbd2_journal_drop_transaction(journal, commit_transaction);
+       } else {
+               if (journal->j_checkpoint_transactions == NULL) {
+                       journal->j_checkpoint_transactions = commit_transaction;
+                       commit_transaction->t_cpnext = commit_transaction;
+                       commit_transaction->t_cpprev = commit_transaction;
+               } else {
+                       commit_transaction->t_cpnext =
+                               journal->j_checkpoint_transactions;
+                       commit_transaction->t_cpprev =
+                               commit_transaction->t_cpnext->t_cpprev;
+                       commit_transaction->t_cpnext->t_cpprev =
+                               commit_transaction;
+                       commit_transaction->t_cpprev->t_cpnext =
+                               commit_transaction;
+               }
+       }
+       spin_unlock(&journal->j_list_lock);
+
+       jbd_debug(1, "JBD: commit %d complete, head %d\n",
+                 journal->j_commit_sequence, journal->j_tail_sequence);
+
+       wake_up(&journal->j_wait_done_commit);
+}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
new file mode 100644 (file)
index 0000000..c60f378
--- /dev/null
@@ -0,0 +1,2084 @@
+/*
+ * linux/fs/jbd2/journal.c
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
+ *
+ * Copyright 1998 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Generic filesystem journal-writing code; part of the ext2fs
+ * journaling system.
+ *
+ * This file manages journals: areas of disk reserved for logging
+ * transactional updates.  This includes the kernel journaling thread
+ * which is responsible for scheduling updates to the log.
+ *
+ * We do not actually manage the physical storage of the journal in this
+ * file: that is left to a per-journal policy function, which allows us
+ * to store the journal within a filesystem-specified area for ext2
+ * journaling (ext2 can use a reserved inode for storing the log).
+ */
+
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include <linux/pagemap.h>
+#include <linux/kthread.h>
+#include <linux/poison.h>
+#include <linux/proc_fs.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+
+EXPORT_SYMBOL(jbd2_journal_start);
+EXPORT_SYMBOL(jbd2_journal_restart);
+EXPORT_SYMBOL(jbd2_journal_extend);
+EXPORT_SYMBOL(jbd2_journal_stop);
+EXPORT_SYMBOL(jbd2_journal_lock_updates);
+EXPORT_SYMBOL(jbd2_journal_unlock_updates);
+EXPORT_SYMBOL(jbd2_journal_get_write_access);
+EXPORT_SYMBOL(jbd2_journal_get_create_access);
+EXPORT_SYMBOL(jbd2_journal_get_undo_access);
+EXPORT_SYMBOL(jbd2_journal_dirty_data);
+EXPORT_SYMBOL(jbd2_journal_dirty_metadata);
+EXPORT_SYMBOL(jbd2_journal_release_buffer);
+EXPORT_SYMBOL(jbd2_journal_forget);
+#if 0
+EXPORT_SYMBOL(journal_sync_buffer);
+#endif
+EXPORT_SYMBOL(jbd2_journal_flush);
+EXPORT_SYMBOL(jbd2_journal_revoke);
+
+EXPORT_SYMBOL(jbd2_journal_init_dev);
+EXPORT_SYMBOL(jbd2_journal_init_inode);
+EXPORT_SYMBOL(jbd2_journal_update_format);
+EXPORT_SYMBOL(jbd2_journal_check_used_features);
+EXPORT_SYMBOL(jbd2_journal_check_available_features);
+EXPORT_SYMBOL(jbd2_journal_set_features);
+EXPORT_SYMBOL(jbd2_journal_create);
+EXPORT_SYMBOL(jbd2_journal_load);
+EXPORT_SYMBOL(jbd2_journal_destroy);
+EXPORT_SYMBOL(jbd2_journal_update_superblock);
+EXPORT_SYMBOL(jbd2_journal_abort);
+EXPORT_SYMBOL(jbd2_journal_errno);
+EXPORT_SYMBOL(jbd2_journal_ack_err);
+EXPORT_SYMBOL(jbd2_journal_clear_err);
+EXPORT_SYMBOL(jbd2_log_wait_commit);
+EXPORT_SYMBOL(jbd2_journal_start_commit);
+EXPORT_SYMBOL(jbd2_journal_force_commit_nested);
+EXPORT_SYMBOL(jbd2_journal_wipe);
+EXPORT_SYMBOL(jbd2_journal_blocks_per_page);
+EXPORT_SYMBOL(jbd2_journal_invalidatepage);
+EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
+EXPORT_SYMBOL(jbd2_journal_force_commit);
+
+static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
+static void __journal_abort_soft (journal_t *journal, int errno);
+static int jbd2_journal_create_jbd_slab(size_t slab_size);
+
+/*
+ * Helper function used to manage commit timeouts
+ */
+
+static void commit_timeout(unsigned long __data)
+{
+       struct task_struct * p = (struct task_struct *) __data;
+
+       wake_up_process(p);
+}
+
+/*
+ * kjournald2: The main thread function used to manage a logging device
+ * journal.
+ *
+ * This kernel thread is responsible for two things:
+ *
+ * 1) COMMIT:  Every so often we need to commit the current state of the
+ *    filesystem to disk.  The journal thread is responsible for writing
+ *    all of the metadata buffers to disk.
+ *
+ * 2) CHECKPOINT: We cannot reuse a used section of the log file until all
+ *    of the data in that part of the log has been rewritten elsewhere on
+ *    the disk.  Flushing these old buffers to reclaim space in the log is
+ *    known as checkpointing, and this thread is responsible for that job.
+ */
+
+static int kjournald2(void *arg)
+{
+       journal_t *journal = arg;
+       transaction_t *transaction;
+
+       /*
+        * Set up an interval timer which can be used to trigger a commit wakeup
+        * after the commit interval expires
+        */
+       setup_timer(&journal->j_commit_timer, commit_timeout,
+                       (unsigned long)current);
+
+       /* Record that the journal thread is running */
+       journal->j_task = current;
+       wake_up(&journal->j_wait_done_commit);
+
+       printk(KERN_INFO "kjournald2 starting.  Commit interval %ld seconds\n",
+                       journal->j_commit_interval / HZ);
+
+       /*
+        * And now, wait forever for commit wakeup events.
+        */
+       spin_lock(&journal->j_state_lock);
+
+loop:
+       if (journal->j_flags & JBD2_UNMOUNT)
+               goto end_loop;
+
+       jbd_debug(1, "commit_sequence=%d, commit_request=%d\n",
+               journal->j_commit_sequence, journal->j_commit_request);
+
+       if (journal->j_commit_sequence != journal->j_commit_request) {
+               jbd_debug(1, "OK, requests differ\n");
+               spin_unlock(&journal->j_state_lock);
+               del_timer_sync(&journal->j_commit_timer);
+               jbd2_journal_commit_transaction(journal);
+               spin_lock(&journal->j_state_lock);
+               goto loop;
+       }
+
+       wake_up(&journal->j_wait_done_commit);
+       if (freezing(current)) {
+               /*
+                * The simpler the better. Flushing journal isn't a
+                * good idea, because that depends on threads that may
+                * be already stopped.
+                */
+               jbd_debug(1, "Now suspending kjournald2\n");
+               spin_unlock(&journal->j_state_lock);
+               refrigerator();
+               spin_lock(&journal->j_state_lock);
+       } else {
+               /*
+                * We assume on resume that commits are already there,
+                * so we don't sleep
+                */
+               DEFINE_WAIT(wait);
+               int should_sleep = 1;
+
+               prepare_to_wait(&journal->j_wait_commit, &wait,
+                               TASK_INTERRUPTIBLE);
+               if (journal->j_commit_sequence != journal->j_commit_request)
+                       should_sleep = 0;
+               transaction = journal->j_running_transaction;
+               if (transaction && time_after_eq(jiffies,
+                                               transaction->t_expires))
+                       should_sleep = 0;
+               if (journal->j_flags & JBD2_UNMOUNT)
+                       should_sleep = 0;
+               if (should_sleep) {
+                       spin_unlock(&journal->j_state_lock);
+                       schedule();
+                       spin_lock(&journal->j_state_lock);
+               }
+               finish_wait(&journal->j_wait_commit, &wait);
+       }
+
+       jbd_debug(1, "kjournald2 wakes\n");
+
+       /*
+        * Were we woken up by a commit wakeup event?
+        */
+       transaction = journal->j_running_transaction;
+       if (transaction && time_after_eq(jiffies, transaction->t_expires)) {
+               journal->j_commit_request = transaction->t_tid;
+               jbd_debug(1, "woke because of timeout\n");
+       }
+       goto loop;
+
+end_loop:
+       spin_unlock(&journal->j_state_lock);
+       del_timer_sync(&journal->j_commit_timer);
+       journal->j_task = NULL;
+       wake_up(&journal->j_wait_done_commit);
+       jbd_debug(1, "Journal thread exiting.\n");
+       return 0;
+}
+
+static void jbd2_journal_start_thread(journal_t *journal)
+{
+       kthread_run(kjournald2, journal, "kjournald2");
+       wait_event(journal->j_wait_done_commit, journal->j_task != 0);
+}
+
+static void journal_kill_thread(journal_t *journal)
+{
+       spin_lock(&journal->j_state_lock);
+       journal->j_flags |= JBD2_UNMOUNT;
+
+       while (journal->j_task) {
+               wake_up(&journal->j_wait_commit);
+               spin_unlock(&journal->j_state_lock);
+               wait_event(journal->j_wait_done_commit, journal->j_task == 0);
+               spin_lock(&journal->j_state_lock);
+       }
+       spin_unlock(&journal->j_state_lock);
+}
+
+/*
+ * jbd2_journal_write_metadata_buffer: write a metadata buffer to the journal.
+ *
+ * Writes a metadata buffer to a given disk block.  The actual IO is not
+ * performed but a new buffer_head is constructed which labels the data
+ * to be written with the correct destination disk block.
+ *
+ * Any magic-number escaping which needs to be done will cause a
+ * copy-out here.  If the buffer happens to start with the
+ * JBD2_MAGIC_NUMBER, then we can't write it to the log directly: the
+ * magic number is only written to the log for descripter blocks.  In
+ * this case, we copy the data and replace the first word with 0, and we
+ * return a result code which indicates that this buffer needs to be
+ * marked as an escaped buffer in the corresponding log descriptor
+ * block.  The missing word can then be restored when the block is read
+ * during recovery.
+ *
+ * If the source buffer has already been modified by a new transaction
+ * since we took the last commit snapshot, we use the frozen copy of
+ * that data for IO.  If we end up using the existing buffer_head's data
+ * for the write, then we *have* to lock the buffer to prevent anyone
+ * else from using and possibly modifying it while the IO is in
+ * progress.
+ *
+ * The function returns a pointer to the buffer_heads to be used for IO.
+ *
+ * We assume that the journal has already been locked in this function.
+ *
+ * Return value:
+ *  <0: Error
+ * >=0: Finished OK
+ *
+ * On success:
+ * Bit 0 set == escape performed on the data
+ * Bit 1 set == buffer copy-out performed (kfree the data after IO)
+ */
+
+int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
+                                 struct journal_head  *jh_in,
+                                 struct journal_head **jh_out,
+                                 unsigned long long blocknr)
+{
+       int need_copy_out = 0;
+       int done_copy_out = 0;
+       int do_escape = 0;
+       char *mapped_data;
+       struct buffer_head *new_bh;
+       struct journal_head *new_jh;
+       struct page *new_page;
+       unsigned int new_offset;
+       struct buffer_head *bh_in = jh2bh(jh_in);
+
+       /*
+        * The buffer really shouldn't be locked: only the current committing
+        * transaction is allowed to write it, so nobody else is allowed
+        * to do any IO.
+        *
+        * akpm: except if we're journalling data, and write() output is
+        * also part of a shared mapping, and another thread has
+        * decided to launch a writepage() against this buffer.
+        */
+       J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
+
+       new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
+
+       /*
+        * If a new transaction has already done a buffer copy-out, then
+        * we use that version of the data for the commit.
+        */
+       jbd_lock_bh_state(bh_in);
+repeat:
+       if (jh_in->b_frozen_data) {
+               done_copy_out = 1;
+               new_page = virt_to_page(jh_in->b_frozen_data);
+               new_offset = offset_in_page(jh_in->b_frozen_data);
+       } else {
+               new_page = jh2bh(jh_in)->b_page;
+               new_offset = offset_in_page(jh2bh(jh_in)->b_data);
+       }
+
+       mapped_data = kmap_atomic(new_page, KM_USER0);
+       /*
+        * Check for escaping
+        */
+       if (*((__be32 *)(mapped_data + new_offset)) ==
+                               cpu_to_be32(JBD2_MAGIC_NUMBER)) {
+               need_copy_out = 1;
+               do_escape = 1;
+       }
+       kunmap_atomic(mapped_data, KM_USER0);
+
+       /*
+        * Do we need to do a data copy?
+        */
+       if (need_copy_out && !done_copy_out) {
+               char *tmp;
+
+               jbd_unlock_bh_state(bh_in);
+               tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS);
+               jbd_lock_bh_state(bh_in);
+               if (jh_in->b_frozen_data) {
+                       jbd2_slab_free(tmp, bh_in->b_size);
+                       goto repeat;
+               }
+
+               jh_in->b_frozen_data = tmp;
+               mapped_data = kmap_atomic(new_page, KM_USER0);
+               memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
+               kunmap_atomic(mapped_data, KM_USER0);
+
+               new_page = virt_to_page(tmp);
+               new_offset = offset_in_page(tmp);
+               done_copy_out = 1;
+       }
+
+       /*
+        * Did we need to do an escaping?  Now we've done all the
+        * copying, we can finally do so.
+        */
+       if (do_escape) {
+               mapped_data = kmap_atomic(new_page, KM_USER0);
+               *((unsigned int *)(mapped_data + new_offset)) = 0;
+               kunmap_atomic(mapped_data, KM_USER0);
+       }
+
+       /* keep subsequent assertions sane */
+       new_bh->b_state = 0;
+       init_buffer(new_bh, NULL, NULL);
+       atomic_set(&new_bh->b_count, 1);
+       jbd_unlock_bh_state(bh_in);
+
+       new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
+
+       set_bh_page(new_bh, new_page, new_offset);
+       new_jh->b_transaction = NULL;
+       new_bh->b_size = jh2bh(jh_in)->b_size;
+       new_bh->b_bdev = transaction->t_journal->j_dev;
+       new_bh->b_blocknr = blocknr;
+       set_buffer_mapped(new_bh);
+       set_buffer_dirty(new_bh);
+
+       *jh_out = new_jh;
+
+       /*
+        * The to-be-written buffer needs to get moved to the io queue,
+        * and the original buffer whose contents we are shadowing or
+        * copying is moved to the transaction's shadow queue.
+        */
+       JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
+       jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
+       JBUFFER_TRACE(new_jh, "file as BJ_IO");
+       jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
+
+       return do_escape | (done_copy_out << 1);
+}
+
+/*
+ * Allocation code for the journal file.  Manage the space left in the
+ * journal, so that we can begin checkpointing when appropriate.
+ */
+
+/*
+ * __jbd2_log_space_left: Return the number of free blocks left in the journal.
+ *
+ * Called with the journal already locked.
+ *
+ * Called under j_state_lock
+ */
+
+int __jbd2_log_space_left(journal_t *journal)
+{
+       int left = journal->j_free;
+
+       assert_spin_locked(&journal->j_state_lock);
+
+       /*
+        * Be pessimistic here about the number of those free blocks which
+        * might be required for log descriptor control blocks.
+        */
+
+#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */
+
+       left -= MIN_LOG_RESERVED_BLOCKS;
+
+       if (left <= 0)
+               return 0;
+       left -= (left >> 3);
+       return left;
+}
+
+/*
+ * Called under j_state_lock.  Returns true if a transaction was started.
+ */
+int __jbd2_log_start_commit(journal_t *journal, tid_t target)
+{
+       /*
+        * Are we already doing a recent enough commit?
+        */
+       if (!tid_geq(journal->j_commit_request, target)) {
+               /*
+                * We want a new commit: OK, mark the request and wakup the
+                * commit thread.  We do _not_ do the commit ourselves.
+                */
+
+               journal->j_commit_request = target;
+               jbd_debug(1, "JBD: requesting commit %d/%d\n",
+                         journal->j_commit_request,
+                         journal->j_commit_sequence);
+               wake_up(&journal->j_wait_commit);
+               return 1;
+       }
+       return 0;
+}
+
+int jbd2_log_start_commit(journal_t *journal, tid_t tid)
+{
+       int ret;
+
+       spin_lock(&journal->j_state_lock);
+       ret = __jbd2_log_start_commit(journal, tid);
+       spin_unlock(&journal->j_state_lock);
+       return ret;
+}
+
+/*
+ * Force and wait upon a commit if the calling process is not within
+ * transaction.  This is used for forcing out undo-protected data which contains
+ * bitmaps, when the fs is running out of space.
+ *
+ * We can only force the running transaction if we don't have an active handle;
+ * otherwise, we will deadlock.
+ *
+ * Returns true if a transaction was started.
+ */
+int jbd2_journal_force_commit_nested(journal_t *journal)
+{
+       transaction_t *transaction = NULL;
+       tid_t tid;
+
+       spin_lock(&journal->j_state_lock);
+       if (journal->j_running_transaction && !current->journal_info) {
+               transaction = journal->j_running_transaction;
+               __jbd2_log_start_commit(journal, transaction->t_tid);
+       } else if (journal->j_committing_transaction)
+               transaction = journal->j_committing_transaction;
+
+       if (!transaction) {
+               spin_unlock(&journal->j_state_lock);
+               return 0;       /* Nothing to retry */
+       }
+
+       tid = transaction->t_tid;
+       spin_unlock(&journal->j_state_lock);
+       jbd2_log_wait_commit(journal, tid);
+       return 1;
+}
+
+/*
+ * Start a commit of the current running transaction (if any).  Returns true
+ * if a transaction was started, and fills its tid in at *ptid
+ */
+int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
+{
+       int ret = 0;
+
+       spin_lock(&journal->j_state_lock);
+       if (journal->j_running_transaction) {
+               tid_t tid = journal->j_running_transaction->t_tid;
+
+               ret = __jbd2_log_start_commit(journal, tid);
+               if (ret && ptid)
+                       *ptid = tid;
+       } else if (journal->j_committing_transaction && ptid) {
+               /*
+                * If ext3_write_super() recently started a commit, then we
+                * have to wait for completion of that transaction
+                */
+               *ptid = journal->j_committing_transaction->t_tid;
+               ret = 1;
+       }
+       spin_unlock(&journal->j_state_lock);
+       return ret;
+}
+
+/*
+ * Wait for a specified commit to complete.
+ * The caller may not hold the journal lock.
+ */
+int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
+{
+       int err = 0;
+
+#ifdef CONFIG_JBD_DEBUG
+       spin_lock(&journal->j_state_lock);
+       if (!tid_geq(journal->j_commit_request, tid)) {
+               printk(KERN_EMERG
+                      "%s: error: j_commit_request=%d, tid=%d\n",
+                      __FUNCTION__, journal->j_commit_request, tid);
+       }
+       spin_unlock(&journal->j_state_lock);
+#endif
+       spin_lock(&journal->j_state_lock);
+       while (tid_gt(tid, journal->j_commit_sequence)) {
+               jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
+                                 tid, journal->j_commit_sequence);
+               wake_up(&journal->j_wait_commit);
+               spin_unlock(&journal->j_state_lock);
+               wait_event(journal->j_wait_done_commit,
+                               !tid_gt(tid, journal->j_commit_sequence));
+               spin_lock(&journal->j_state_lock);
+       }
+       spin_unlock(&journal->j_state_lock);
+
+       if (unlikely(is_journal_aborted(journal))) {
+               printk(KERN_EMERG "journal commit I/O error\n");
+               err = -EIO;
+       }
+       return err;
+}
+
+/*
+ * Log buffer allocation routines:
+ */
+
+int jbd2_journal_next_log_block(journal_t *journal, unsigned long long *retp)
+{
+       unsigned long blocknr;
+
+       spin_lock(&journal->j_state_lock);
+       J_ASSERT(journal->j_free > 1);
+
+       blocknr = journal->j_head;
+       journal->j_head++;
+       journal->j_free--;
+       if (journal->j_head == journal->j_last)
+               journal->j_head = journal->j_first;
+       spin_unlock(&journal->j_state_lock);
+       return jbd2_journal_bmap(journal, blocknr, retp);
+}
+
+/*
+ * Conversion of logical to physical block numbers for the journal
+ *
+ * On external journals the journal blocks are identity-mapped, so
+ * this is a no-op.  If needed, we can use j_blk_offset - everything is
+ * ready.
+ */
+int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
+                unsigned long long *retp)
+{
+       int err = 0;
+       unsigned long long ret;
+
+       if (journal->j_inode) {
+               ret = bmap(journal->j_inode, blocknr);
+               if (ret)
+                       *retp = ret;
+               else {
+                       char b[BDEVNAME_SIZE];
+
+                       printk(KERN_ALERT "%s: journal block not found "
+                                       "at offset %lu on %s\n",
+                               __FUNCTION__,
+                               blocknr,
+                               bdevname(journal->j_dev, b));
+                       err = -EIO;
+                       __journal_abort_soft(journal, err);
+               }
+       } else {
+               *retp = blocknr; /* +journal->j_blk_offset */
+       }
+       return err;
+}
+
+/*
+ * We play buffer_head aliasing tricks to write data/metadata blocks to
+ * the journal without copying their contents, but for journal
+ * descriptor blocks we do need to generate bona fide buffers.
+ *
+ * After the caller of jbd2_journal_get_descriptor_buffer() has finished modifying
+ * the buffer's contents they really should run flush_dcache_page(bh->b_page).
+ * But we don't bother doing that, so there will be coherency problems with
+ * mmaps of blockdevs which hold live JBD-controlled filesystems.
+ */
+struct journal_head *jbd2_journal_get_descriptor_buffer(journal_t *journal)
+{
+       struct buffer_head *bh;
+       unsigned long long blocknr;
+       int err;
+
+       err = jbd2_journal_next_log_block(journal, &blocknr);
+
+       if (err)
+               return NULL;
+
+       bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+       lock_buffer(bh);
+       memset(bh->b_data, 0, journal->j_blocksize);
+       set_buffer_uptodate(bh);
+       unlock_buffer(bh);
+       BUFFER_TRACE(bh, "return this buffer");
+       return jbd2_journal_add_journal_head(bh);
+}
+
+/*
+ * Management for journal control blocks: functions to create and
+ * destroy journal_t structures, and to initialise and read existing
+ * journal blocks from disk.  */
+
+/* First: create and setup a journal_t object in memory.  We initialise
+ * very few fields yet: that has to wait until we have created the
+ * journal structures from from scratch, or loaded them from disk. */
+
+static journal_t * journal_init_common (void)
+{
+       journal_t *journal;
+       int err;
+
+       journal = jbd_kmalloc(sizeof(*journal), GFP_KERNEL);
+       if (!journal)
+               goto fail;
+       memset(journal, 0, sizeof(*journal));
+
+       init_waitqueue_head(&journal->j_wait_transaction_locked);
+       init_waitqueue_head(&journal->j_wait_logspace);
+       init_waitqueue_head(&journal->j_wait_done_commit);
+       init_waitqueue_head(&journal->j_wait_checkpoint);
+       init_waitqueue_head(&journal->j_wait_commit);
+       init_waitqueue_head(&journal->j_wait_updates);
+       mutex_init(&journal->j_barrier);
+       mutex_init(&journal->j_checkpoint_mutex);
+       spin_lock_init(&journal->j_revoke_lock);
+       spin_lock_init(&journal->j_list_lock);
+       spin_lock_init(&journal->j_state_lock);
+
+       journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
+
+       /* The journal is marked for error until we succeed with recovery! */
+       journal->j_flags = JBD2_ABORT;
+
+       /* Set up a default-sized revoke table for the new mount. */
+       err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
+       if (err) {
+               kfree(journal);
+               goto fail;
+       }
+       return journal;
+fail:
+       return NULL;
+}
+
+/* jbd2_journal_init_dev and jbd2_journal_init_inode:
+ *
+ * Create a journal structure assigned some fixed set of disk blocks to
+ * the journal.  We don't actually touch those disk blocks yet, but we
+ * need to set up all of the mapping information to tell the journaling
+ * system where the journal blocks are.
+ *
+ */
+
+/**
+ *  journal_t * jbd2_journal_init_dev() - creates an initialises a journal structure
+ *  @bdev: Block device on which to create the journal
+ *  @fs_dev: Device which hold journalled filesystem for this journal.
+ *  @start: Block nr Start of journal.
+ *  @len:  Length of the journal in blocks.
+ *  @blocksize: blocksize of journalling device
+ *  @returns: a newly created journal_t *
+ *
+ *  jbd2_journal_init_dev creates a journal which maps a fixed contiguous
+ *  range of blocks on an arbitrary block device.
+ *
+ */
+journal_t * jbd2_journal_init_dev(struct block_device *bdev,
+                       struct block_device *fs_dev,
+                       unsigned long long start, int len, int blocksize)
+{
+       journal_t *journal = journal_init_common();
+       struct buffer_head *bh;
+       int n;
+
+       if (!journal)
+               return NULL;
+
+       /* journal descriptor can store up to n blocks -bzzz */
+       journal->j_blocksize = blocksize;
+       n = journal->j_blocksize / sizeof(journal_block_tag_t);
+       journal->j_wbufsize = n;
+       journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
+       if (!journal->j_wbuf) {
+               printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+                       __FUNCTION__);
+               kfree(journal);
+               journal = NULL;
+               goto out;
+       }
+       journal->j_dev = bdev;
+       journal->j_fs_dev = fs_dev;
+       journal->j_blk_offset = start;
+       journal->j_maxlen = len;
+
+       bh = __getblk(journal->j_dev, start, journal->j_blocksize);
+       J_ASSERT(bh != NULL);
+       journal->j_sb_buffer = bh;
+       journal->j_superblock = (journal_superblock_t *)bh->b_data;
+out:
+       return journal;
+}
+
+/**
+ *  journal_t * jbd2_journal_init_inode () - creates a journal which maps to a inode.
+ *  @inode: An inode to create the journal in
+ *
+ * jbd2_journal_init_inode creates a journal which maps an on-disk inode as
+ * the journal.  The inode must exist already, must support bmap() and
+ * must have all data blocks preallocated.
+ */
+journal_t * jbd2_journal_init_inode (struct inode *inode)
+{
+       struct buffer_head *bh;
+       journal_t *journal = journal_init_common();
+       int err;
+       int n;
+       unsigned long long blocknr;
+
+       if (!journal)
+               return NULL;
+
+       journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
+       journal->j_inode = inode;
+       jbd_debug(1,
+                 "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
+                 journal, inode->i_sb->s_id, inode->i_ino,
+                 (long long) inode->i_size,
+                 inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
+
+       journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits;
+       journal->j_blocksize = inode->i_sb->s_blocksize;
+
+       /* journal descriptor can store up to n blocks -bzzz */
+       n = journal->j_blocksize / sizeof(journal_block_tag_t);
+       journal->j_wbufsize = n;
+       journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
+       if (!journal->j_wbuf) {
+               printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+                       __FUNCTION__);
+               kfree(journal);
+               return NULL;
+       }
+
+       err = jbd2_journal_bmap(journal, 0, &blocknr);
+       /* If that failed, give up */
+       if (err) {
+               printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
+                      __FUNCTION__);
+               kfree(journal);
+               return NULL;
+       }
+
+       bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+       J_ASSERT(bh != NULL);
+       journal->j_sb_buffer = bh;
+       journal->j_superblock = (journal_superblock_t *)bh->b_data;
+
+       return journal;
+}
+
+/*
+ * If the journal init or create aborts, we need to mark the journal
+ * superblock as being NULL to prevent the journal destroy from writing
+ * back a bogus superblock.
+ */
+static void journal_fail_superblock (journal_t *journal)
+{
+       struct buffer_head *bh = journal->j_sb_buffer;
+       brelse(bh);
+       journal->j_sb_buffer = NULL;
+}
+
+/*
+ * Given a journal_t structure, initialise the various fields for
+ * startup of a new journaling session.  We use this both when creating
+ * a journal, and after recovering an old journal to reset it for
+ * subsequent use.
+ */
+
+static int journal_reset(journal_t *journal)
+{
+       journal_superblock_t *sb = journal->j_superblock;
+       unsigned long long first, last;
+
+       first = be32_to_cpu(sb->s_first);
+       last = be32_to_cpu(sb->s_maxlen);
+
+       journal->j_first = first;
+       journal->j_last = last;
+
+       journal->j_head = first;
+       journal->j_tail = first;
+       journal->j_free = last - first;
+
+       journal->j_tail_sequence = journal->j_transaction_sequence;
+       journal->j_commit_sequence = journal->j_transaction_sequence - 1;
+       journal->j_commit_request = journal->j_commit_sequence;
+
+       journal->j_max_transaction_buffers = journal->j_maxlen / 4;
+
+       /* Add the dynamic fields and write it to disk. */
+       jbd2_journal_update_superblock(journal, 1);
+       jbd2_journal_start_thread(journal);
+       return 0;
+}
+
+/**
+ * int jbd2_journal_create() - Initialise the new journal file
+ * @journal: Journal to create. This structure must have been initialised
+ *
+ * Given a journal_t structure which tells us which disk blocks we can
+ * use, create a new journal superblock and initialise all of the
+ * journal fields from scratch.
+ **/
+int jbd2_journal_create(journal_t *journal)
+{
+       unsigned long long blocknr;
+       struct buffer_head *bh;
+       journal_superblock_t *sb;
+       int i, err;
+
+       if (journal->j_maxlen < JBD2_MIN_JOURNAL_BLOCKS) {
+               printk (KERN_ERR "Journal length (%d blocks) too short.\n",
+                       journal->j_maxlen);
+               journal_fail_superblock(journal);
+               return -EINVAL;
+       }
+
+       if (journal->j_inode == NULL) {
+               /*
+                * We don't know what block to start at!
+                */
+               printk(KERN_EMERG
+                      "%s: creation of journal on external device!\n",
+                      __FUNCTION__);
+               BUG();
+       }
+
+       /* Zero out the entire journal on disk.  We cannot afford to
+          have any blocks on disk beginning with JBD2_MAGIC_NUMBER. */
+       jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
+       for (i = 0; i < journal->j_maxlen; i++) {
+               err = jbd2_journal_bmap(journal, i, &blocknr);
+               if (err)
+                       return err;
+               bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+               lock_buffer(bh);
+               memset (bh->b_data, 0, journal->j_blocksize);
+               BUFFER_TRACE(bh, "marking dirty");
+               mark_buffer_dirty(bh);
+               BUFFER_TRACE(bh, "marking uptodate");
+               set_buffer_uptodate(bh);
+               unlock_buffer(bh);
+               __brelse(bh);
+       }
+
+       sync_blockdev(journal->j_dev);
+       jbd_debug(1, "JBD: journal cleared.\n");
+
+       /* OK, fill in the initial static fields in the new superblock */
+       sb = journal->j_superblock;
+
+       sb->s_header.h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
+       sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
+
+       sb->s_blocksize = cpu_to_be32(journal->j_blocksize);
+       sb->s_maxlen    = cpu_to_be32(journal->j_maxlen);
+       sb->s_first     = cpu_to_be32(1);
+
+       journal->j_transaction_sequence = 1;
+
+       journal->j_flags &= ~JBD2_ABORT;
+       journal->j_format_version = 2;
+
+       return journal_reset(journal);
+}
+
+/**
+ * void jbd2_journal_update_superblock() - Update journal sb on disk.
+ * @journal: The journal to update.
+ * @wait: Set to '0' if you don't want to wait for IO completion.
+ *
+ * Update a journal's dynamic superblock fields and write it to disk,
+ * optionally waiting for the IO to complete.
+ */
+void jbd2_journal_update_superblock(journal_t *journal, int wait)
+{
+       journal_superblock_t *sb = journal->j_superblock;
+       struct buffer_head *bh = journal->j_sb_buffer;
+
+       /*
+        * As a special case, if the on-disk copy is already marked as needing
+        * no recovery (s_start == 0) and there are no outstanding transactions
+        * in the filesystem, then we can safely defer the superblock update
+        * until the next commit by setting JBD2_FLUSHED.  This avoids
+        * attempting a write to a potential-readonly device.
+        */
+       if (sb->s_start == 0 && journal->j_tail_sequence ==
+                               journal->j_transaction_sequence) {
+               jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
+                       "(start %ld, seq %d, errno %d)\n",
+                       journal->j_tail, journal->j_tail_sequence,
+                       journal->j_errno);
+               goto out;
+       }
+
+       spin_lock(&journal->j_state_lock);
+       jbd_debug(1,"JBD: updating superblock (start %ld, seq %d, errno %d)\n",
+                 journal->j_tail, journal->j_tail_sequence, journal->j_errno);
+
+       sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
+       sb->s_start    = cpu_to_be32(journal->j_tail);
+       sb->s_errno    = cpu_to_be32(journal->j_errno);
+       spin_unlock(&journal->j_state_lock);
+
+       BUFFER_TRACE(bh, "marking dirty");
+       mark_buffer_dirty(bh);
+       if (wait)
+               sync_dirty_buffer(bh);
+       else
+               ll_rw_block(SWRITE, 1, &bh);
+
+out:
+       /* If we have just flushed the log (by marking s_start==0), then
+        * any future commit will have to be careful to update the
+        * superblock again to re-record the true start of the log. */
+
+       spin_lock(&journal->j_state_lock);
+       if (sb->s_start)
+               journal->j_flags &= ~JBD2_FLUSHED;
+       else
+               journal->j_flags |= JBD2_FLUSHED;
+       spin_unlock(&journal->j_state_lock);
+}
+
+/*
+ * Read the superblock for a given journal, performing initial
+ * validation of the format.
+ */
+
+static int journal_get_superblock(journal_t *journal)
+{
+       struct buffer_head *bh;
+       journal_superblock_t *sb;
+       int err = -EIO;
+
+       bh = journal->j_sb_buffer;
+
+       J_ASSERT(bh != NULL);
+       if (!buffer_uptodate(bh)) {
+               ll_rw_block(READ, 1, &bh);
+               wait_on_buffer(bh);
+               if (!buffer_uptodate(bh)) {
+                       printk (KERN_ERR
+                               "JBD: IO error reading journal superblock\n");
+                       goto out;
+               }
+       }
+
+       sb = journal->j_superblock;
+
+       err = -EINVAL;
+
+       if (sb->s_header.h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER) ||
+           sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
+               printk(KERN_WARNING "JBD: no valid journal superblock found\n");
+               goto out;
+       }
+
+       switch(be32_to_cpu(sb->s_header.h_blocktype)) {
+       case JBD2_SUPERBLOCK_V1:
+               journal->j_format_version = 1;
+               break;
+       case JBD2_SUPERBLOCK_V2:
+               journal->j_format_version = 2;
+               break;
+       default:
+               printk(KERN_WARNING "JBD: unrecognised superblock format ID\n");
+               goto out;
+       }
+
+       if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen)
+               journal->j_maxlen = be32_to_cpu(sb->s_maxlen);
+       else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) {
+               printk (KERN_WARNING "JBD: journal file too short\n");
+               goto out;
+       }
+
+       return 0;
+
+out:
+       journal_fail_superblock(journal);
+       return err;
+}
+
+/*
+ * Load the on-disk journal superblock and read the key fields into the
+ * journal_t.
+ */
+
+static int load_superblock(journal_t *journal)
+{
+       int err;
+       journal_superblock_t *sb;
+
+       err = journal_get_superblock(journal);
+       if (err)
+               return err;
+
+       sb = journal->j_superblock;
+
+       journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
+       journal->j_tail = be32_to_cpu(sb->s_start);
+       journal->j_first = be32_to_cpu(sb->s_first);
+       journal->j_last = be32_to_cpu(sb->s_maxlen);
+       journal->j_errno = be32_to_cpu(sb->s_errno);
+
+       return 0;
+}
+
+
+/**
+ * int jbd2_journal_load() - Read journal from disk.
+ * @journal: Journal to act on.
+ *
+ * Given a journal_t structure which tells us which disk blocks contain
+ * a journal, read the journal from disk to initialise the in-memory
+ * structures.
+ */
+int jbd2_journal_load(journal_t *journal)
+{
+       int err;
+       journal_superblock_t *sb;
+
+       err = load_superblock(journal);
+       if (err)
+               return err;
+
+       sb = journal->j_superblock;
+       /* If this is a V2 superblock, then we have to check the
+        * features flags on it. */
+
+       if (journal->j_format_version >= 2) {
+               if ((sb->s_feature_ro_compat &
+                    ~cpu_to_be32(JBD2_KNOWN_ROCOMPAT_FEATURES)) ||
+                   (sb->s_feature_incompat &
+                    ~cpu_to_be32(JBD2_KNOWN_INCOMPAT_FEATURES))) {
+                       printk (KERN_WARNING
+                               "JBD: Unrecognised features on journal\n");
+                       return -EINVAL;
+               }
+       }
+
+       /*
+        * Create a slab for this blocksize
+        */
+       err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
+       if (err)
+               return err;
+
+       /* Let the recovery code check whether it needs to recover any
+        * data from the journal. */
+       if (jbd2_journal_recover(journal))
+               goto recovery_error;
+
+       /* OK, we've finished with the dynamic journal bits:
+        * reinitialise the dynamic contents of the superblock in memory
+        * and reset them on disk. */
+       if (journal_reset(journal))
+               goto recovery_error;
+
+       journal->j_flags &= ~JBD2_ABORT;
+       journal->j_flags |= JBD2_LOADED;
+       return 0;
+
+recovery_error:
+       printk (KERN_WARNING "JBD: recovery failed\n");
+       return -EIO;
+}
+
+/**
+ * void jbd2_journal_destroy() - Release a journal_t structure.
+ * @journal: Journal to act on.
+ *
+ * Release a journal_t structure once it is no longer in use by the
+ * journaled object.
+ */
+void jbd2_journal_destroy(journal_t *journal)
+{
+       /* Wait for the commit thread to wake up and die. */
+       journal_kill_thread(journal);
+
+       /* Force a final log commit */
+       if (journal->j_running_transaction)
+               jbd2_journal_commit_transaction(journal);
+
+       /* Force any old transactions to disk */
+
+       /* Totally anal locking here... */
+       spin_lock(&journal->j_list_lock);
+       while (journal->j_checkpoint_transactions != NULL) {
+               spin_unlock(&journal->j_list_lock);
+               jbd2_log_do_checkpoint(journal);
+               spin_lock(&journal->j_list_lock);
+       }
+
+       J_ASSERT(journal->j_running_transaction == NULL);
+       J_ASSERT(journal->j_committing_transaction == NULL);
+       J_ASSERT(journal->j_checkpoint_transactions == NULL);
+       spin_unlock(&journal->j_list_lock);
+
+       /* We can now mark the journal as empty. */
+       journal->j_tail = 0;
+       journal->j_tail_sequence = ++journal->j_transaction_sequence;
+       if (journal->j_sb_buffer) {
+               jbd2_journal_update_superblock(journal, 1);
+               brelse(journal->j_sb_buffer);
+       }
+
+       if (journal->j_inode)
+               iput(journal->j_inode);
+       if (journal->j_revoke)
+               jbd2_journal_destroy_revoke(journal);
+       kfree(journal->j_wbuf);
+       kfree(journal);
+}
+
+
+/**
+ *int jbd2_journal_check_used_features () - Check if features specified are used.
+ * @journal: Journal to check.
+ * @compat: bitmask of compatible features
+ * @ro: bitmask of features that force read-only mount
+ * @incompat: bitmask of incompatible features
+ *
+ * Check whether the journal uses all of a given set of
+ * features.  Return true (non-zero) if it does.
+ **/
+
+int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat,
+                                unsigned long ro, unsigned long incompat)
+{
+       journal_superblock_t *sb;
+
+       if (!compat && !ro && !incompat)
+               return 1;
+       if (journal->j_format_version == 1)
+               return 0;
+
+       sb = journal->j_superblock;
+
+       if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) &&
+           ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) &&
+           ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat))
+               return 1;
+
+       return 0;
+}
+
+/**
+ * int jbd2_journal_check_available_features() - Check feature set in journalling layer
+ * @journal: Journal to check.
+ * @compat: bitmask of compatible features
+ * @ro: bitmask of features that force read-only mount
+ * @incompat: bitmask of incompatible features
+ *
+ * Check whether the journaling code supports the use of
+ * all of a given set of features on this journal.  Return true
+ * (non-zero) if it can. */
+
+int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat,
+                                     unsigned long ro, unsigned long incompat)
+{
+       journal_superblock_t *sb;
+
+       if (!compat && !ro && !incompat)
+               return 1;
+
+       sb = journal->j_superblock;
+
+       /* We can support any known requested features iff the
+        * superblock is in version 2.  Otherwise we fail to support any
+        * extended sb features. */
+
+       if (journal->j_format_version != 2)
+               return 0;
+
+       if ((compat   & JBD2_KNOWN_COMPAT_FEATURES) == compat &&
+           (ro       & JBD2_KNOWN_ROCOMPAT_FEATURES) == ro &&
+           (incompat & JBD2_KNOWN_INCOMPAT_FEATURES) == incompat)
+               return 1;
+
+       return 0;
+}
+
+/**
+ * int jbd2_journal_set_features () - Mark a given journal feature in the superblock
+ * @journal: Journal to act on.
+ * @compat: bitmask of compatible features
+ * @ro: bitmask of features that force read-only mount
+ * @incompat: bitmask of incompatible features
+ *
+ * Mark a given journal feature as present on the
+ * superblock.  Returns true if the requested features could be set.
+ *
+ */
+
+int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
+                         unsigned long ro, unsigned long incompat)
+{
+       journal_superblock_t *sb;
+
+       if (jbd2_journal_check_used_features(journal, compat, ro, incompat))
+               return 1;
+
+       if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
+               return 0;
+
+       jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
+                 compat, ro, incompat);
+
+       sb = journal->j_superblock;
+
+       sb->s_feature_compat    |= cpu_to_be32(compat);
+       sb->s_feature_ro_compat |= cpu_to_be32(ro);
+       sb->s_feature_incompat  |= cpu_to_be32(incompat);
+
+       return 1;
+}
+
+
+/**
+ * int jbd2_journal_update_format () - Update on-disk journal structure.
+ * @journal: Journal to act on.
+ *
+ * Given an initialised but unloaded journal struct, poke about in the
+ * on-disk structure to update it to the most recent supported version.
+ */
+int jbd2_journal_update_format (journal_t *journal)
+{
+       journal_superblock_t *sb;
+       int err;
+
+       err = journal_get_superblock(journal);
+       if (err)
+               return err;
+
+       sb = journal->j_superblock;
+
+       switch (be32_to_cpu(sb->s_header.h_blocktype)) {
+       case JBD2_SUPERBLOCK_V2:
+               return 0;
+       case JBD2_SUPERBLOCK_V1:
+               return journal_convert_superblock_v1(journal, sb);
+       default:
+               break;
+       }
+       return -EINVAL;
+}
+
+static int journal_convert_superblock_v1(journal_t *journal,
+                                        journal_superblock_t *sb)
+{
+       int offset, blocksize;
+       struct buffer_head *bh;
+
+       printk(KERN_WARNING
+               "JBD: Converting superblock from version 1 to 2.\n");
+
+       /* Pre-initialise new fields to zero */
+       offset = ((char *) &(sb->s_feature_compat)) - ((char *) sb);
+       blocksize = be32_to_cpu(sb->s_blocksize);
+       memset(&sb->s_feature_compat, 0, blocksize-offset);
+
+       sb->s_nr_users = cpu_to_be32(1);
+       sb->s_header.h_blocktype = cpu_to_be32(JBD2_SUPERBLOCK_V2);
+       journal->j_format_version = 2;
+
+       bh = journal->j_sb_buffer;
+       BUFFER_TRACE(bh, "marking dirty");
+       mark_buffer_dirty(bh);
+       sync_dirty_buffer(bh);
+       return 0;
+}
+
+
+/**
+ * int jbd2_journal_flush () - Flush journal
+ * @journal: Journal to act on.
+ *
+ * Flush all data for a given journal to disk and empty the journal.
+ * Filesystems can use this when remounting readonly to ensure that
+ * recovery does not need to happen on remount.
+ */
+
+int jbd2_journal_flush(journal_t *journal)
+{
+       int err = 0;
+       transaction_t *transaction = NULL;
+       unsigned long old_tail;
+
+       spin_lock(&journal->j_state_lock);
+
+       /* Force everything buffered to the log... */
+       if (journal->j_running_transaction) {
+               transaction = journal->j_running_transaction;
+               __jbd2_log_start_commit(journal, transaction->t_tid);
+       } else if (journal->j_committing_transaction)
+               transaction = journal->j_committing_transaction;
+
+       /* Wait for the log commit to complete... */
+       if (transaction) {
+               tid_t tid = transaction->t_tid;
+
+               spin_unlock(&journal->j_state_lock);
+               jbd2_log_wait_commit(journal, tid);
+       } else {
+               spin_unlock(&journal->j_state_lock);
+       }
+
+       /* ...and flush everything in the log out to disk. */
+       spin_lock(&journal->j_list_lock);
+       while (!err && journal->j_checkpoint_transactions != NULL) {
+               spin_unlock(&journal->j_list_lock);
+               err = jbd2_log_do_checkpoint(journal);
+               spin_lock(&journal->j_list_lock);
+       }
+       spin_unlock(&journal->j_list_lock);
+       jbd2_cleanup_journal_tail(journal);
+
+       /* Finally, mark the journal as really needing no recovery.
+        * This sets s_start==0 in the underlying superblock, which is
+        * the magic code for a fully-recovered superblock.  Any future
+        * commits of data to the journal will restore the current
+        * s_start value. */
+       spin_lock(&journal->j_state_lock);
+       old_tail = journal->j_tail;
+       journal->j_tail = 0;
+       spin_unlock(&journal->j_state_lock);
+       jbd2_journal_update_superblock(journal, 1);
+       spin_lock(&journal->j_state_lock);
+       journal->j_tail = old_tail;
+
+       J_ASSERT(!journal->j_running_transaction);
+       J_ASSERT(!journal->j_committing_transaction);
+       J_ASSERT(!journal->j_checkpoint_transactions);
+       J_ASSERT(journal->j_head == journal->j_tail);
+       J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
+       spin_unlock(&journal->j_state_lock);
+       return err;
+}
+
+/**
+ * int jbd2_journal_wipe() - Wipe journal contents
+ * @journal: Journal to act on.
+ * @write: flag (see below)
+ *
+ * Wipe out all of the contents of a journal, safely.  This will produce
+ * a warning if the journal contains any valid recovery information.
+ * Must be called between journal_init_*() and jbd2_journal_load().
+ *
+ * If 'write' is non-zero, then we wipe out the journal on disk; otherwise
+ * we merely suppress recovery.
+ */
+
+int jbd2_journal_wipe(journal_t *journal, int write)
+{
+       journal_superblock_t *sb;
+       int err = 0;
+
+       J_ASSERT (!(journal->j_flags & JBD2_LOADED));
+
+       err = load_superblock(journal);
+       if (err)
+               return err;
+
+       sb = journal->j_superblock;
+
+       if (!journal->j_tail)
+               goto no_recovery;
+
+       printk (KERN_WARNING "JBD: %s recovery information on journal\n",
+               write ? "Clearing" : "Ignoring");
+
+       err = jbd2_journal_skip_recovery(journal);
+       if (write)
+               jbd2_journal_update_superblock(journal, 1);
+
+ no_recovery:
+       return err;
+}
+
+/*
+ * journal_dev_name: format a character string to describe on what
+ * device this journal is present.
+ */
+
+static const char *journal_dev_name(journal_t *journal, char *buffer)
+{
+       struct block_device *bdev;
+
+       if (journal->j_inode)
+               bdev = journal->j_inode->i_sb->s_bdev;
+       else
+               bdev = journal->j_dev;
+
+       return bdevname(bdev, buffer);
+}
+
+/*
+ * Journal abort has very specific semantics, which we describe
+ * for journal abort.
+ *
+ * Two internal function, which provide abort to te jbd layer
+ * itself are here.
+ */
+
+/*
+ * Quick version for internal journal use (doesn't lock the journal).
+ * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
+ * and don't attempt to make any other journal updates.
+ */
+void __jbd2_journal_abort_hard(journal_t *journal)
+{
+       transaction_t *transaction;
+       char b[BDEVNAME_SIZE];
+
+       if (journal->j_flags & JBD2_ABORT)
+               return;
+
+       printk(KERN_ERR "Aborting journal on device %s.\n",
+               journal_dev_name(journal, b));
+
+       spin_lock(&journal->j_state_lock);
+       journal->j_flags |= JBD2_ABORT;
+       transaction = journal->j_running_transaction;
+       if (transaction)
+               __jbd2_log_start_commit(journal, transaction->t_tid);
+       spin_unlock(&journal->j_state_lock);
+}
+
+/* Soft abort: record the abort error status in the journal superblock,
+ * but don't do any other IO. */
+static void __journal_abort_soft (journal_t *journal, int errno)
+{
+       if (journal->j_flags & JBD2_ABORT)
+               return;
+
+       if (!journal->j_errno)
+               journal->j_errno = errno;
+
+       __jbd2_journal_abort_hard(journal);
+
+       if (errno)
+               jbd2_journal_update_superblock(journal, 1);
+}
+
+/**
+ * void jbd2_journal_abort () - Shutdown the journal immediately.
+ * @journal: the journal to shutdown.
+ * @errno:   an error number to record in the journal indicating
+ *           the reason for the shutdown.
+ *
+ * Perform a complete, immediate shutdown of the ENTIRE
+ * journal (not of a single transaction).  This operation cannot be
+ * undone without closing and reopening the journal.
+ *
+ * The jbd2_journal_abort function is intended to support higher level error
+ * recovery mechanisms such as the ext2/ext3 remount-readonly error
+ * mode.
+ *
+ * Journal abort has very specific semantics.  Any existing dirty,
+ * unjournaled buffers in the main filesystem will still be written to
+ * disk by bdflush, but the journaling mechanism will be suspended
+ * immediately and no further transaction commits will be honoured.
+ *
+ * Any dirty, journaled buffers will be written back to disk without
+ * hitting the journal.  Atomicity cannot be guaranteed on an aborted
+ * filesystem, but we _do_ attempt to leave as much data as possible
+ * behind for fsck to use for cleanup.
+ *
+ * Any attempt to get a new transaction handle on a journal which is in
+ * ABORT state will just result in an -EROFS error return.  A
+ * jbd2_journal_stop on an existing handle will return -EIO if we have
+ * entered abort state during the update.
+ *
+ * Recursive transactions are not disturbed by journal abort until the
+ * final jbd2_journal_stop, which will receive the -EIO error.
+ *
+ * Finally, the jbd2_journal_abort call allows the caller to supply an errno
+ * which will be recorded (if possible) in the journal superblock.  This
+ * allows a client to record failure conditions in the middle of a
+ * transaction without having to complete the transaction to record the
+ * failure to disk.  ext3_error, for example, now uses this
+ * functionality.
+ *
+ * Errors which originate from within the journaling layer will NOT
+ * supply an errno; a null errno implies that absolutely no further
+ * writes are done to the journal (unless there are any already in
+ * progress).
+ *
+ */
+
+void jbd2_journal_abort(journal_t *journal, int errno)
+{
+       __journal_abort_soft(journal, errno);
+}
+
+/**
+ * int jbd2_journal_errno () - returns the journal's error state.
+ * @journal: journal to examine.
+ *
+ * This is the errno numbet set with jbd2_journal_abort(), the last
+ * time the journal was mounted - if the journal was stopped
+ * without calling abort this will be 0.
+ *
+ * If the journal has been aborted on this mount time -EROFS will
+ * be returned.
+ */
+int jbd2_journal_errno(journal_t *journal)
+{
+       int err;
+
+       spin_lock(&journal->j_state_lock);
+       if (journal->j_flags & JBD2_ABORT)
+               err = -EROFS;
+       else
+               err = journal->j_errno;
+       spin_unlock(&journal->j_state_lock);
+       return err;
+}
+
+/**
+ * int jbd2_journal_clear_err () - clears the journal's error state
+ * @journal: journal to act on.
+ *
+ * An error must be cleared or Acked to take a FS out of readonly
+ * mode.
+ */
+int jbd2_journal_clear_err(journal_t *journal)
+{
+       int err = 0;
+
+       spin_lock(&journal->j_state_lock);
+       if (journal->j_flags & JBD2_ABORT)
+               err = -EROFS;
+       else
+               journal->j_errno = 0;
+       spin_unlock(&journal->j_state_lock);
+       return err;
+}
+
+/**
+ * void jbd2_journal_ack_err() - Ack journal err.
+ * @journal: journal to act on.
+ *
+ * An error must be cleared or Acked to take a FS out of readonly
+ * mode.
+ */
+void jbd2_journal_ack_err(journal_t *journal)
+{
+       spin_lock(&journal->j_state_lock);
+       if (journal->j_errno)
+               journal->j_flags |= JBD2_ACK_ERR;
+       spin_unlock(&journal->j_state_lock);
+}
+
+int jbd2_journal_blocks_per_page(struct inode *inode)
+{
+       return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
+}
+
+/*
+ * helper functions to deal with 32 or 64bit block numbers.
+ */
+size_t journal_tag_bytes(journal_t *journal)
+{
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+               return JBD_TAG_SIZE64;
+       else
+               return JBD_TAG_SIZE32;
+}
+
+/*
+ * Simple support for retrying memory allocations.  Introduced to help to
+ * debug different VM deadlock avoidance strategies.
+ */
+void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
+{
+       return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
+}
+
+/*
+ * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
+ * and allocate frozen and commit buffers from these slabs.
+ *
+ * Reason for doing this is to avoid, SLAB_DEBUG - since it could
+ * cause bh to cross page boundary.
+ */
+
+#define JBD_MAX_SLABS 5
+#define JBD_SLAB_INDEX(size)  (size >> 11)
+
+static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
+static const char *jbd_slab_names[JBD_MAX_SLABS] = {
+       "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
+};
+
+static void jbd2_journal_destroy_jbd_slabs(void)
+{
+       int i;
+
+       for (i = 0; i < JBD_MAX_SLABS; i++) {
+               if (jbd_slab[i])
+                       kmem_cache_destroy(jbd_slab[i]);
+               jbd_slab[i] = NULL;
+       }
+}
+
+static int jbd2_journal_create_jbd_slab(size_t slab_size)
+{
+       int i = JBD_SLAB_INDEX(slab_size);
+
+       BUG_ON(i >= JBD_MAX_SLABS);
+
+       /*
+        * Check if we already have a slab created for this size
+        */
+       if (jbd_slab[i])
+               return 0;
+
+       /*
+        * Create a slab and force alignment to be same as slabsize -
+        * this will make sure that allocations won't cross the page
+        * boundary.
+        */
+       jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
+                               slab_size, slab_size, 0, NULL, NULL);
+       if (!jbd_slab[i]) {
+               printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void * jbd2_slab_alloc(size_t size, gfp_t flags)
+{
+       int idx;
+
+       idx = JBD_SLAB_INDEX(size);
+       BUG_ON(jbd_slab[idx] == NULL);
+       return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
+}
+
+void jbd2_slab_free(void *ptr,  size_t size)
+{
+       int idx;
+
+       idx = JBD_SLAB_INDEX(size);
+       BUG_ON(jbd_slab[idx] == NULL);
+       kmem_cache_free(jbd_slab[idx], ptr);
+}
+
+/*
+ * Journal_head storage management
+ */
+static kmem_cache_t *jbd2_journal_head_cache;
+#ifdef CONFIG_JBD_DEBUG
+static atomic_t nr_journal_heads = ATOMIC_INIT(0);
+#endif
+
+static int journal_init_jbd2_journal_head_cache(void)
+{
+       int retval;
+
+       J_ASSERT(jbd2_journal_head_cache == 0);
+       jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
+                               sizeof(struct journal_head),
+                               0,              /* offset */
+                               0,              /* flags */
+                               NULL,           /* ctor */
+                               NULL);          /* dtor */
+       retval = 0;
+       if (jbd2_journal_head_cache == 0) {
+               retval = -ENOMEM;
+               printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
+       }
+       return retval;
+}
+
+static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
+{
+       J_ASSERT(jbd2_journal_head_cache != NULL);
+       kmem_cache_destroy(jbd2_journal_head_cache);
+       jbd2_journal_head_cache = NULL;
+}
+
+/*
+ * journal_head splicing and dicing
+ */
+static struct journal_head *journal_alloc_journal_head(void)
+{
+       struct journal_head *ret;
+       static unsigned long last_warning;
+
+#ifdef CONFIG_JBD_DEBUG
+       atomic_inc(&nr_journal_heads);
+#endif
+       ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
+       if (ret == 0) {
+               jbd_debug(1, "out of memory for journal_head\n");
+               if (time_after(jiffies, last_warning + 5*HZ)) {
+                       printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
+                              __FUNCTION__);
+                       last_warning = jiffies;
+               }
+               while (ret == 0) {
+                       yield();
+                       ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
+               }
+       }
+       return ret;
+}
+
+static void journal_free_journal_head(struct journal_head *jh)
+{
+#ifdef CONFIG_JBD_DEBUG
+       atomic_dec(&nr_journal_heads);
+       memset(jh, JBD_POISON_FREE, sizeof(*jh));
+#endif
+       kmem_cache_free(jbd2_journal_head_cache, jh);
+}
+
+/*
+ * A journal_head is attached to a buffer_head whenever JBD has an
+ * interest in the buffer.
+ *
+ * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit
+ * is set.  This bit is tested in core kernel code where we need to take
+ * JBD-specific actions.  Testing the zeroness of ->b_private is not reliable
+ * there.
+ *
+ * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one.
+ *
+ * When a buffer has its BH_JBD bit set it is immune from being released by
+ * core kernel code, mainly via ->b_count.
+ *
+ * A journal_head may be detached from its buffer_head when the journal_head's
+ * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
+ * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the
+ * journal_head can be dropped if needed.
+ *
+ * Various places in the kernel want to attach a journal_head to a buffer_head
+ * _before_ attaching the journal_head to a transaction.  To protect the
+ * journal_head in this situation, jbd2_journal_add_journal_head elevates the
+ * journal_head's b_jcount refcount by one.  The caller must call
+ * jbd2_journal_put_journal_head() to undo this.
+ *
+ * So the typical usage would be:
+ *
+ *     (Attach a journal_head if needed.  Increments b_jcount)
+ *     struct journal_head *jh = jbd2_journal_add_journal_head(bh);
+ *     ...
+ *     jh->b_transaction = xxx;
+ *     jbd2_journal_put_journal_head(jh);
+ *
+ * Now, the journal_head's b_jcount is zero, but it is safe from being released
+ * because it has a non-zero b_transaction.
+ */
+
+/*
+ * Give a buffer_head a journal_head.
+ *
+ * Doesn't need the journal lock.
+ * May sleep.
+ */
+struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh)
+{
+       struct journal_head *jh;
+       struct journal_head *new_jh = NULL;
+
+repeat:
+       if (!buffer_jbd(bh)) {
+               new_jh = journal_alloc_journal_head();
+               memset(new_jh, 0, sizeof(*new_jh));
+       }
+
+       jbd_lock_bh_journal_head(bh);
+       if (buffer_jbd(bh)) {
+               jh = bh2jh(bh);
+       } else {
+               J_ASSERT_BH(bh,
+                       (atomic_read(&bh->b_count) > 0) ||
+                       (bh->b_page && bh->b_page->mapping));
+
+               if (!new_jh) {
+                       jbd_unlock_bh_journal_head(bh);
+                       goto repeat;
+               }
+
+               jh = new_jh;
+               new_jh = NULL;          /* We consumed it */
+               set_buffer_jbd(bh);
+               bh->b_private = jh;
+               jh->b_bh = bh;
+               get_bh(bh);
+               BUFFER_TRACE(bh, "added journal_head");
+       }
+       jh->b_jcount++;
+       jbd_unlock_bh_journal_head(bh);
+       if (new_jh)
+               journal_free_journal_head(new_jh);
+       return bh->b_private;
+}
+
+/*
+ * Grab a ref against this buffer_head's journal_head.  If it ended up not
+ * having a journal_head, return NULL
+ */
+struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh)
+{
+       struct journal_head *jh = NULL;
+
+       jbd_lock_bh_journal_head(bh);
+       if (buffer_jbd(bh)) {
+               jh = bh2jh(bh);
+               jh->b_jcount++;
+       }
+       jbd_unlock_bh_journal_head(bh);
+       return jh;
+}
+
+static void __journal_remove_journal_head(struct buffer_head *bh)
+{
+       struct journal_head *jh = bh2jh(bh);
+
+       J_ASSERT_JH(jh, jh->b_jcount >= 0);
+
+       get_bh(bh);
+       if (jh->b_jcount == 0) {
+               if (jh->b_transaction == NULL &&
+                               jh->b_next_transaction == NULL &&
+                               jh->b_cp_transaction == NULL) {
+                       J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
+                       J_ASSERT_BH(bh, buffer_jbd(bh));
+                       J_ASSERT_BH(bh, jh2bh(jh) == bh);
+                       BUFFER_TRACE(bh, "remove journal_head");
+                       if (jh->b_frozen_data) {
+                               printk(KERN_WARNING "%s: freeing "
+                                               "b_frozen_data\n",
+                                               __FUNCTION__);
+                               jbd2_slab_free(jh->b_frozen_data, bh->b_size);
+                       }
+                       if (jh->b_committed_data) {
+                               printk(KERN_WARNING "%s: freeing "
+                                               "b_committed_data\n",
+                                               __FUNCTION__);
+                               jbd2_slab_free(jh->b_committed_data, bh->b_size);
+                       }
+                       bh->b_private = NULL;
+                       jh->b_bh = NULL;        /* debug, really */
+                       clear_buffer_jbd(bh);
+                       __brelse(bh);
+                       journal_free_journal_head(jh);
+               } else {
+                       BUFFER_TRACE(bh, "journal_head was locked");
+               }
+       }
+}
+
+/*
+ * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction
+ * and has a zero b_jcount then remove and release its journal_head.   If we did
+ * see that the buffer is not used by any transaction we also "logically"
+ * decrement ->b_count.
+ *
+ * We in fact take an additional increment on ->b_count as a convenience,
+ * because the caller usually wants to do additional things with the bh
+ * after calling here.
+ * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some
+ * time.  Once the caller has run __brelse(), the buffer is eligible for
+ * reaping by try_to_free_buffers().
+ */
+void jbd2_journal_remove_journal_head(struct buffer_head *bh)
+{
+       jbd_lock_bh_journal_head(bh);
+       __journal_remove_journal_head(bh);
+       jbd_unlock_bh_journal_head(bh);
+}
+
+/*
+ * Drop a reference on the passed journal_head.  If it fell to zero then try to
+ * release the journal_head from the buffer_head.
+ */
+void jbd2_journal_put_journal_head(struct journal_head *jh)
+{
+       struct buffer_head *bh = jh2bh(jh);
+
+       jbd_lock_bh_journal_head(bh);
+       J_ASSERT_JH(jh, jh->b_jcount > 0);
+       --jh->b_jcount;
+       if (!jh->b_jcount && !jh->b_transaction) {
+               __journal_remove_journal_head(bh);
+               __brelse(bh);
+       }
+       jbd_unlock_bh_journal_head(bh);
+}
+
+/*
+ * /proc tunables
+ */
+#if defined(CONFIG_JBD_DEBUG)
+int jbd2_journal_enable_debug;
+EXPORT_SYMBOL(jbd2_journal_enable_debug);
+#endif
+
+#if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS)
+
+static struct proc_dir_entry *proc_jbd_debug;
+
+static int read_jbd_debug(char *page, char **start, off_t off,
+                         int count, int *eof, void *data)
+{
+       int ret;
+
+       ret = sprintf(page + off, "%d\n", jbd2_journal_enable_debug);
+       *eof = 1;
+       return ret;
+}
+
+static int write_jbd_debug(struct file *file, const char __user *buffer,
+                          unsigned long count, void *data)
+{
+       char buf[32];
+
+       if (count > ARRAY_SIZE(buf) - 1)
+               count = ARRAY_SIZE(buf) - 1;
+       if (copy_from_user(buf, buffer, count))
+               return -EFAULT;
+       buf[ARRAY_SIZE(buf) - 1] = '\0';
+       jbd2_journal_enable_debug = simple_strtoul(buf, NULL, 10);
+       return count;
+}
+
+#define JBD_PROC_NAME "sys/fs/jbd2-debug"
+
+static void __init create_jbd_proc_entry(void)
+{
+       proc_jbd_debug = create_proc_entry(JBD_PROC_NAME, 0644, NULL);
+       if (proc_jbd_debug) {
+               /* Why is this so hard? */
+               proc_jbd_debug->read_proc = read_jbd_debug;
+               proc_jbd_debug->write_proc = write_jbd_debug;
+       }
+}
+
+static void __exit jbd2_remove_jbd_proc_entry(void)
+{
+       if (proc_jbd_debug)
+               remove_proc_entry(JBD_PROC_NAME, NULL);
+}
+
+#else
+
+#define create_jbd_proc_entry() do {} while (0)
+#define jbd2_remove_jbd_proc_entry() do {} while (0)
+
+#endif
+
+kmem_cache_t *jbd2_handle_cache;
+
+static int __init journal_init_handle_cache(void)
+{
+       jbd2_handle_cache = kmem_cache_create("jbd2_journal_handle",
+                               sizeof(handle_t),
+                               0,              /* offset */
+                               0,              /* flags */
+                               NULL,           /* ctor */
+                               NULL);          /* dtor */
+       if (jbd2_handle_cache == NULL) {
+               printk(KERN_EMERG "JBD: failed to create handle cache\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void jbd2_journal_destroy_handle_cache(void)
+{
+       if (jbd2_handle_cache)
+               kmem_cache_destroy(jbd2_handle_cache);
+}
+
+/*
+ * Module startup and shutdown
+ */
+
+static int __init journal_init_caches(void)
+{
+       int ret;
+
+       ret = jbd2_journal_init_revoke_caches();
+       if (ret == 0)
+               ret = journal_init_jbd2_journal_head_cache();
+       if (ret == 0)
+               ret = journal_init_handle_cache();
+       return ret;
+}
+
+static void jbd2_journal_destroy_caches(void)
+{
+       jbd2_journal_destroy_revoke_caches();
+       jbd2_journal_destroy_jbd2_journal_head_cache();
+       jbd2_journal_destroy_handle_cache();
+       jbd2_journal_destroy_jbd_slabs();
+}
+
+static int __init journal_init(void)
+{
+       int ret;
+
+       BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
+
+       ret = journal_init_caches();
+       if (ret != 0)
+               jbd2_journal_destroy_caches();
+       create_jbd_proc_entry();
+       return ret;
+}
+
+static void __exit journal_exit(void)
+{
+#ifdef CONFIG_JBD_DEBUG
+       int n = atomic_read(&nr_journal_heads);
+       if (n)
+               printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
+#endif
+       jbd2_remove_jbd_proc_entry();
+       jbd2_journal_destroy_caches();
+}
+
+MODULE_LICENSE("GPL");
+module_init(journal_init);
+module_exit(journal_exit);
+
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
new file mode 100644 (file)
index 0000000..9f10aca
--- /dev/null
@@ -0,0 +1,609 @@
+/*
+ * linux/fs/recovery.c
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
+ *
+ * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Journal recovery routines for the generic filesystem journaling code;
+ * part of the ext2fs journaling system.
+ */
+
+#ifndef __KERNEL__
+#include "jfs_user.h"
+#else
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#endif
+
+/*
+ * Maintain information about the progress of the recovery job, so that
+ * the different passes can carry information between them.
+ */
+struct recovery_info
+{
+       tid_t           start_transaction;
+       tid_t           end_transaction;
+
+       int             nr_replays;
+       int             nr_revokes;
+       int             nr_revoke_hits;
+};
+
+enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
+static int do_one_pass(journal_t *journal,
+                               struct recovery_info *info, enum passtype pass);
+static int scan_revoke_records(journal_t *, struct buffer_head *,
+                               tid_t, struct recovery_info *);
+
+#ifdef __KERNEL__
+
+/* Release readahead buffers after use */
+static void journal_brelse_array(struct buffer_head *b[], int n)
+{
+       while (--n >= 0)
+               brelse (b[n]);
+}
+
+
+/*
+ * When reading from the journal, we are going through the block device
+ * layer directly and so there is no readahead being done for us.  We
+ * need to implement any readahead ourselves if we want it to happen at
+ * all.  Recovery is basically one long sequential read, so make sure we
+ * do the IO in reasonably large chunks.
+ *
+ * This is not so critical that we need to be enormously clever about
+ * the readahead size, though.  128K is a purely arbitrary, good-enough
+ * fixed value.
+ */
+
+#define MAXBUF 8
+static int do_readahead(journal_t *journal, unsigned int start)
+{
+       int err;
+       unsigned int max, nbufs, next;
+       unsigned long long blocknr;
+       struct buffer_head *bh;
+
+       struct buffer_head * bufs[MAXBUF];
+
+       /* Do up to 128K of readahead */
+       max = start + (128 * 1024 / journal->j_blocksize);
+       if (max > journal->j_maxlen)
+               max = journal->j_maxlen;
+
+       /* Do the readahead itself.  We'll submit MAXBUF buffer_heads at
+        * a time to the block device IO layer. */
+
+       nbufs = 0;
+
+       for (next = start; next < max; next++) {
+               err = jbd2_journal_bmap(journal, next, &blocknr);
+
+               if (err) {
+                       printk (KERN_ERR "JBD: bad block at offset %u\n",
+                               next);
+                       goto failed;
+               }
+
+               bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+               if (!bh) {
+                       err = -ENOMEM;
+                       goto failed;
+               }
+
+               if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
+                       bufs[nbufs++] = bh;
+                       if (nbufs == MAXBUF) {
+                               ll_rw_block(READ, nbufs, bufs);
+                               journal_brelse_array(bufs, nbufs);
+                               nbufs = 0;
+                       }
+               } else
+                       brelse(bh);
+       }
+
+       if (nbufs)
+               ll_rw_block(READ, nbufs, bufs);
+       err = 0;
+
+failed:
+       if (nbufs)
+               journal_brelse_array(bufs, nbufs);
+       return err;
+}
+
+#endif /* __KERNEL__ */
+
+
+/*
+ * Read a block from the journal
+ */
+
+static int jread(struct buffer_head **bhp, journal_t *journal,
+                unsigned int offset)
+{
+       int err;
+       unsigned long long blocknr;
+       struct buffer_head *bh;
+
+       *bhp = NULL;
+
+       if (offset >= journal->j_maxlen) {
+               printk(KERN_ERR "JBD: corrupted journal superblock\n");
+               return -EIO;
+       }
+
+       err = jbd2_journal_bmap(journal, offset, &blocknr);
+
+       if (err) {
+               printk (KERN_ERR "JBD: bad block at offset %u\n",
+                       offset);
+               return err;
+       }
+
+       bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+       if (!bh)
+               return -ENOMEM;
+
+       if (!buffer_uptodate(bh)) {
+               /* If this is a brand new buffer, start readahead.
+                   Otherwise, we assume we are already reading it.  */
+               if (!buffer_req(bh))
+                       do_readahead(journal, offset);
+               wait_on_buffer(bh);
+       }
+
+       if (!buffer_uptodate(bh)) {
+               printk (KERN_ERR "JBD: Failed to read block at offset %u\n",
+                       offset);
+               brelse(bh);
+               return -EIO;
+       }
+
+       *bhp = bh;
+       return 0;
+}
+
+
+/*
+ * Count the number of in-use tags in a journal descriptor block.
+ */
+
+static int count_tags(journal_t *journal, struct buffer_head *bh)
+{
+       char *                  tagp;
+       journal_block_tag_t *   tag;
+       int                     nr = 0, size = journal->j_blocksize;
+       int                     tag_bytes = journal_tag_bytes(journal);
+
+       tagp = &bh->b_data[sizeof(journal_header_t)];
+
+       while ((tagp - bh->b_data + tag_bytes) <= size) {
+               tag = (journal_block_tag_t *) tagp;
+
+               nr++;
+               tagp += tag_bytes;
+               if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID)))
+                       tagp += 16;
+
+               if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG))
+                       break;
+       }
+
+       return nr;
+}
+
+
+/* Make sure we wrap around the log correctly! */
+#define wrap(journal, var)                                             \
+do {                                                                   \
+       if (var >= (journal)->j_last)                                   \
+               var -= ((journal)->j_last - (journal)->j_first);        \
+} while (0)
+
+/**
+ * jbd2_journal_recover - recovers a on-disk journal
+ * @journal: the journal to recover
+ *
+ * The primary function for recovering the log contents when mounting a
+ * journaled device.
+ *
+ * Recovery is done in three passes.  In the first pass, we look for the
+ * end of the log.  In the second, we assemble the list of revoke
+ * blocks.  In the third and final pass, we replay any un-revoked blocks
+ * in the log.
+ */
+int jbd2_journal_recover(journal_t *journal)
+{
+       int                     err;
+       journal_superblock_t *  sb;
+
+       struct recovery_info    info;
+
+       memset(&info, 0, sizeof(info));
+       sb = journal->j_superblock;
+
+       /*
+        * The journal superblock's s_start field (the current log head)
+        * is always zero if, and only if, the journal was cleanly
+        * unmounted.
+        */
+
+       if (!sb->s_start) {
+               jbd_debug(1, "No recovery required, last transaction %d\n",
+                         be32_to_cpu(sb->s_sequence));
+               journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
+               return 0;
+       }
+
+       err = do_one_pass(journal, &info, PASS_SCAN);
+       if (!err)
+               err = do_one_pass(journal, &info, PASS_REVOKE);
+       if (!err)
+               err = do_one_pass(journal, &info, PASS_REPLAY);
+
+       jbd_debug(0, "JBD: recovery, exit status %d, "
+                 "recovered transactions %u to %u\n",
+                 err, info.start_transaction, info.end_transaction);
+       jbd_debug(0, "JBD: Replayed %d and revoked %d/%d blocks\n",
+                 info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
+
+       /* Restart the log at the next transaction ID, thus invalidating
+        * any existing commit records in the log. */
+       journal->j_transaction_sequence = ++info.end_transaction;
+
+       jbd2_journal_clear_revoke(journal);
+       sync_blockdev(journal->j_fs_dev);
+       return err;
+}
+
+/**
+ * jbd2_journal_skip_recovery - Start journal and wipe exiting records
+ * @journal: journal to startup
+ *
+ * Locate any valid recovery information from the journal and set up the
+ * journal structures in memory to ignore it (presumably because the
+ * caller has evidence that it is out of date).
+ * This function does'nt appear to be exorted..
+ *
+ * We perform one pass over the journal to allow us to tell the user how
+ * much recovery information is being erased, and to let us initialise
+ * the journal transaction sequence numbers to the next unused ID.
+ */
+int jbd2_journal_skip_recovery(journal_t *journal)
+{
+       int                     err;
+       journal_superblock_t *  sb;
+
+       struct recovery_info    info;
+
+       memset (&info, 0, sizeof(info));
+       sb = journal->j_superblock;
+
+       err = do_one_pass(journal, &info, PASS_SCAN);
+
+       if (err) {
+               printk(KERN_ERR "JBD: error %d scanning journal\n", err);
+               ++journal->j_transaction_sequence;
+       } else {
+#ifdef CONFIG_JBD_DEBUG
+               int dropped = info.end_transaction - be32_to_cpu(sb->s_sequence);
+#endif
+               jbd_debug(0,
+                         "JBD: ignoring %d transaction%s from the journal.\n",
+                         dropped, (dropped == 1) ? "" : "s");
+               journal->j_transaction_sequence = ++info.end_transaction;
+       }
+
+       journal->j_tail = 0;
+       return err;
+}
+
+static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag)
+{
+       unsigned long long block = be32_to_cpu(tag->t_blocknr);
+       if (tag_bytes > JBD_TAG_SIZE32)
+               block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
+       return block;
+}
+
+static int do_one_pass(journal_t *journal,
+                       struct recovery_info *info, enum passtype pass)
+{
+       unsigned int            first_commit_ID, next_commit_ID;
+       unsigned long           next_log_block;
+       int                     err, success = 0;
+       journal_superblock_t *  sb;
+       journal_header_t *      tmp;
+       struct buffer_head *    bh;
+       unsigned int            sequence;
+       int                     blocktype;
+       int                     tag_bytes = journal_tag_bytes(journal);
+
+       /* Precompute the maximum metadata descriptors in a descriptor block */
+       int                     MAX_BLOCKS_PER_DESC;
+       MAX_BLOCKS_PER_DESC = ((journal->j_blocksize-sizeof(journal_header_t))
+                              / tag_bytes);
+
+       /*
+        * First thing is to establish what we expect to find in the log
+        * (in terms of transaction IDs), and where (in terms of log
+        * block offsets): query the superblock.
+        */
+
+       sb = journal->j_superblock;
+       next_commit_ID = be32_to_cpu(sb->s_sequence);
+       next_log_block = be32_to_cpu(sb->s_start);
+
+       first_commit_ID = next_commit_ID;
+       if (pass == PASS_SCAN)
+               info->start_transaction = first_commit_ID;
+
+       jbd_debug(1, "Starting recovery pass %d\n", pass);
+
+       /*
+        * Now we walk through the log, transaction by transaction,
+        * making sure that each transaction has a commit block in the
+        * expected place.  Each complete transaction gets replayed back
+        * into the main filesystem.
+        */
+
+       while (1) {
+               int                     flags;
+               char *                  tagp;
+               journal_block_tag_t *   tag;
+               struct buffer_head *    obh;
+               struct buffer_head *    nbh;
+
+               cond_resched();         /* We're under lock_kernel() */
+
+               /* If we already know where to stop the log traversal,
+                * check right now that we haven't gone past the end of
+                * the log. */
+
+               if (pass != PASS_SCAN)
+                       if (tid_geq(next_commit_ID, info->end_transaction))
+                               break;
+
+               jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n",
+                         next_commit_ID, next_log_block, journal->j_last);
+
+               /* Skip over each chunk of the transaction looking
+                * either the next descriptor block or the final commit
+                * record. */
+
+               jbd_debug(3, "JBD: checking block %ld\n", next_log_block);
+               err = jread(&bh, journal, next_log_block);
+               if (err)
+                       goto failed;
+
+               next_log_block++;
+               wrap(journal, next_log_block);
+
+               /* What kind of buffer is it?
+                *
+                * If it is a descriptor block, check that it has the
+                * expected sequence number.  Otherwise, we're all done
+                * here. */
+
+               tmp = (journal_header_t *)bh->b_data;
+
+               if (tmp->h_magic != cpu_to_be32(JBD2_MAGIC_NUMBER)) {
+                       brelse(bh);
+                       break;
+               }
+
+               blocktype = be32_to_cpu(tmp->h_blocktype);
+               sequence = be32_to_cpu(tmp->h_sequence);
+               jbd_debug(3, "Found magic %d, sequence %d\n",
+                         blocktype, sequence);
+
+               if (sequence != next_commit_ID) {
+                       brelse(bh);
+                       break;
+               }
+
+               /* OK, we have a valid descriptor block which matches
+                * all of the sequence number checks.  What are we going
+                * to do with it?  That depends on the pass... */
+
+               switch(blocktype) {
+               case JBD2_DESCRIPTOR_BLOCK:
+                       /* If it is a valid descriptor block, replay it
+                        * in pass REPLAY; otherwise, just skip over the
+                        * blocks it describes. */
+                       if (pass != PASS_REPLAY) {
+                               next_log_block += count_tags(journal, bh);
+                               wrap(journal, next_log_block);
+                               brelse(bh);
+                               continue;
+                       }
+
+                       /* A descriptor block: we can now write all of
+                        * the data blocks.  Yay, useful work is finally
+                        * getting done here! */
+
+                       tagp = &bh->b_data[sizeof(journal_header_t)];
+                       while ((tagp - bh->b_data + tag_bytes)
+                              <= journal->j_blocksize) {
+                               unsigned long io_block;
+
+                               tag = (journal_block_tag_t *) tagp;
+                               flags = be32_to_cpu(tag->t_flags);
+
+                               io_block = next_log_block++;
+                               wrap(journal, next_log_block);
+                               err = jread(&obh, journal, io_block);
+                               if (err) {
+                                       /* Recover what we can, but
+                                        * report failure at the end. */
+                                       success = err;
+                                       printk (KERN_ERR
+                                               "JBD: IO error %d recovering "
+                                               "block %ld in log\n",
+                                               err, io_block);
+                               } else {
+                                       unsigned long long blocknr;
+
+                                       J_ASSERT(obh != NULL);
+                                       blocknr = read_tag_block(tag_bytes,
+                                                                tag);
+
+                                       /* If the block has been
+                                        * revoked, then we're all done
+                                        * here. */
+                                       if (jbd2_journal_test_revoke
+                                           (journal, blocknr,
+                                            next_commit_ID)) {
+                                               brelse(obh);
+                                               ++info->nr_revoke_hits;
+                                               goto skip_write;
+                                       }
+
+                                       /* Find a buffer for the new
+                                        * data being restored */
+                                       nbh = __getblk(journal->j_fs_dev,
+                                                       blocknr,
+                                                       journal->j_blocksize);
+                                       if (nbh == NULL) {
+                                               printk(KERN_ERR
+                                                      "JBD: Out of memory "
+                                                      "during recovery.\n");
+                                               err = -ENOMEM;
+                                               brelse(bh);
+                                               brelse(obh);
+                                               goto failed;
+                                       }
+
+                                       lock_buffer(nbh);
+                                       memcpy(nbh->b_data, obh->b_data,
+                                                       journal->j_blocksize);
+                                       if (flags & JBD2_FLAG_ESCAPE) {
+                                               *((__be32 *)bh->b_data) =
+                                               cpu_to_be32(JBD2_MAGIC_NUMBER);
+                                       }
+
+                                       BUFFER_TRACE(nbh, "marking dirty");
+                                       set_buffer_uptodate(nbh);
+                                       mark_buffer_dirty(nbh);
+                                       BUFFER_TRACE(nbh, "marking uptodate");
+                                       ++info->nr_replays;
+                                       /* ll_rw_block(WRITE, 1, &nbh); */
+                                       unlock_buffer(nbh);
+                                       brelse(obh);
+                                       brelse(nbh);
+                               }
+
+                       skip_write:
+                               tagp += tag_bytes;
+                               if (!(flags & JBD2_FLAG_SAME_UUID))
+                                       tagp += 16;
+
+                               if (flags & JBD2_FLAG_LAST_TAG)
+                                       break;
+                       }
+
+                       brelse(bh);
+                       continue;
+
+               case JBD2_COMMIT_BLOCK:
+                       /* Found an expected commit block: not much to
+                        * do other than move on to the next sequence
+                        * number. */
+                       brelse(bh);
+                       next_commit_ID++;
+                       continue;
+
+               case JBD2_REVOKE_BLOCK:
+                       /* If we aren't in the REVOKE pass, then we can
+                        * just skip over this block. */
+                       if (pass != PASS_REVOKE) {
+                               brelse(bh);
+                               continue;
+                       }
+
+                       err = scan_revoke_records(journal, bh,
+                                                 next_commit_ID, info);
+                       brelse(bh);
+                       if (err)
+                               goto failed;
+                       continue;
+
+               default:
+                       jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
+                                 blocktype);
+                       brelse(bh);
+                       goto done;
+               }
+       }
+
+ done:
+       /*
+        * We broke out of the log scan loop: either we came to the
+        * known end of the log or we found an unexpected block in the
+        * log.  If the latter happened, then we know that the "current"
+        * transaction marks the end of the valid log.
+        */
+
+       if (pass == PASS_SCAN)
+               info->end_transaction = next_commit_ID;
+       else {
+               /* It's really bad news if different passes end up at
+                * different places (but possible due to IO errors). */
+               if (info->end_transaction != next_commit_ID) {
+                       printk (KERN_ERR "JBD: recovery pass %d ended at "
+                               "transaction %u, expected %u\n",
+                               pass, next_commit_ID, info->end_transaction);
+                       if (!success)
+                               success = -EIO;
+               }
+       }
+
+       return success;
+
+ failed:
+       return err;
+}
+
+
+/* Scan a revoke record, marking all blocks mentioned as revoked. */
+
+static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
+                              tid_t sequence, struct recovery_info *info)
+{
+       jbd2_journal_revoke_header_t *header;
+       int offset, max;
+       int record_len = 4;
+
+       header = (jbd2_journal_revoke_header_t *) bh->b_data;
+       offset = sizeof(jbd2_journal_revoke_header_t);
+       max = be32_to_cpu(header->r_count);
+
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+               record_len = 8;
+
+       while (offset + record_len <= max) {
+               unsigned long long blocknr;
+               int err;
+
+               if (record_len == 4)
+                       blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
+               else
+                       blocknr = be64_to_cpu(* ((__be64 *) (bh->b_data+offset)));
+               offset += record_len;
+               err = jbd2_journal_set_revoke(journal, blocknr, sequence);
+               if (err)
+                       return err;
+               ++info->nr_revokes;
+       }
+       return 0;
+}
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
new file mode 100644 (file)
index 0000000..380d199
--- /dev/null
@@ -0,0 +1,712 @@
+/*
+ * linux/fs/revoke.c
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
+ *
+ * Copyright 2000 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Journal revoke routines for the generic filesystem journaling code;
+ * part of the ext2fs journaling system.
+ *
+ * Revoke is the mechanism used to prevent old log records for deleted
+ * metadata from being replayed on top of newer data using the same
+ * blocks.  The revoke mechanism is used in two separate places:
+ *
+ * + Commit: during commit we write the entire list of the current
+ *   transaction's revoked blocks to the journal
+ *
+ * + Recovery: during recovery we record the transaction ID of all
+ *   revoked blocks.  If there are multiple revoke records in the log
+ *   for a single block, only the last one counts, and if there is a log
+ *   entry for a block beyond the last revoke, then that log entry still
+ *   gets replayed.
+ *
+ * We can get interactions between revokes and new log data within a
+ * single transaction:
+ *
+ * Block is revoked and then journaled:
+ *   The desired end result is the journaling of the new block, so we
+ *   cancel the revoke before the transaction commits.
+ *
+ * Block is journaled and then revoked:
+ *   The revoke must take precedence over the write of the block, so we
+ *   need either to cancel the journal entry or to write the revoke
+ *   later in the log than the log block.  In this case, we choose the
+ *   latter: journaling a block cancels any revoke record for that block
+ *   in the current transaction, so any revoke for that block in the
+ *   transaction must have happened after the block was journaled and so
+ *   the revoke must take precedence.
+ *
+ * Block is revoked and then written as data:
+ *   The data write is allowed to succeed, but the revoke is _not_
+ *   cancelled.  We still need to prevent old log records from
+ *   overwriting the new data.  We don't even need to clear the revoke
+ *   bit here.
+ *
+ * Revoke information on buffers is a tri-state value:
+ *
+ * RevokeValid clear:  no cached revoke status, need to look it up
+ * RevokeValid set, Revoked clear:
+ *                     buffer has not been revoked, and cancel_revoke
+ *                     need do nothing.
+ * RevokeValid set, Revoked set:
+ *                     buffer has been revoked.
+ */
+
+#ifndef __KERNEL__
+#include "jfs_user.h"
+#else
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#endif
+
+static kmem_cache_t *jbd2_revoke_record_cache;
+static kmem_cache_t *jbd2_revoke_table_cache;
+
+/* Each revoke record represents one single revoked block.  During
+   journal replay, this involves recording the transaction ID of the
+   last transaction to revoke this block. */
+
+struct jbd2_revoke_record_s
+{
+       struct list_head  hash;
+       tid_t             sequence;     /* Used for recovery only */
+       unsigned long long        blocknr;
+};
+
+
+/* The revoke table is just a simple hash table of revoke records. */
+struct jbd2_revoke_table_s
+{
+       /* It is conceivable that we might want a larger hash table
+        * for recovery.  Must be a power of two. */
+       int               hash_size;
+       int               hash_shift;
+       struct list_head *hash_table;
+};
+
+
+#ifdef __KERNEL__
+static void write_one_revoke_record(journal_t *, transaction_t *,
+                                   struct journal_head **, int *,
+                                   struct jbd2_revoke_record_s *);
+static void flush_descriptor(journal_t *, struct journal_head *, int);
+#endif
+
+/* Utility functions to maintain the revoke table */
+
+/* Borrowed from buffer.c: this is a tried and tested block hash function */
+static inline int hash(journal_t *journal, unsigned long long block)
+{
+       struct jbd2_revoke_table_s *table = journal->j_revoke;
+       int hash_shift = table->hash_shift;
+       int hash = (int)block ^ (int)((block >> 31) >> 1);
+
+       return ((hash << (hash_shift - 6)) ^
+               (hash >> 13) ^
+               (hash << (hash_shift - 12))) & (table->hash_size - 1);
+}
+
+static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr,
+                             tid_t seq)
+{
+       struct list_head *hash_list;
+       struct jbd2_revoke_record_s *record;
+
+repeat:
+       record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS);
+       if (!record)
+               goto oom;
+
+       record->sequence = seq;
+       record->blocknr = blocknr;
+       hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
+       spin_lock(&journal->j_revoke_lock);
+       list_add(&record->hash, hash_list);
+       spin_unlock(&journal->j_revoke_lock);
+       return 0;
+
+oom:
+       if (!journal_oom_retry)
+               return -ENOMEM;
+       jbd_debug(1, "ENOMEM in %s, retrying\n", __FUNCTION__);
+       yield();
+       goto repeat;
+}
+
+/* Find a revoke record in the journal's hash table. */
+
+static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
+                                                     unsigned long long blocknr)
+{
+       struct list_head *hash_list;
+       struct jbd2_revoke_record_s *record;
+
+       hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
+
+       spin_lock(&journal->j_revoke_lock);
+       record = (struct jbd2_revoke_record_s *) hash_list->next;
+       while (&(record->hash) != hash_list) {
+               if (record->blocknr == blocknr) {
+                       spin_unlock(&journal->j_revoke_lock);
+                       return record;
+               }
+               record = (struct jbd2_revoke_record_s *) record->hash.next;
+       }
+       spin_unlock(&journal->j_revoke_lock);
+       return NULL;
+}
+
+int __init jbd2_journal_init_revoke_caches(void)
+{
+       jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record",
+                                          sizeof(struct jbd2_revoke_record_s),
+                                          0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+       if (jbd2_revoke_record_cache == 0)
+               return -ENOMEM;
+
+       jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table",
+                                          sizeof(struct jbd2_revoke_table_s),
+                                          0, 0, NULL, NULL);
+       if (jbd2_revoke_table_cache == 0) {
+               kmem_cache_destroy(jbd2_revoke_record_cache);
+               jbd2_revoke_record_cache = NULL;
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void jbd2_journal_destroy_revoke_caches(void)
+{
+       kmem_cache_destroy(jbd2_revoke_record_cache);
+       jbd2_revoke_record_cache = NULL;
+       kmem_cache_destroy(jbd2_revoke_table_cache);
+       jbd2_revoke_table_cache = NULL;
+}
+
+/* Initialise the revoke table for a given journal to a given size. */
+
+int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
+{
+       int shift, tmp;
+
+       J_ASSERT (journal->j_revoke_table[0] == NULL);
+
+       shift = 0;
+       tmp = hash_size;
+       while((tmp >>= 1UL) != 0UL)
+               shift++;
+
+       journal->j_revoke_table[0] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
+       if (!journal->j_revoke_table[0])
+               return -ENOMEM;
+       journal->j_revoke = journal->j_revoke_table[0];
+
+       /* Check that the hash_size is a power of two */
+       J_ASSERT ((hash_size & (hash_size-1)) == 0);
+
+       journal->j_revoke->hash_size = hash_size;
+
+       journal->j_revoke->hash_shift = shift;
+
+       journal->j_revoke->hash_table =
+               kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
+       if (!journal->j_revoke->hash_table) {
+               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
+               journal->j_revoke = NULL;
+               return -ENOMEM;
+       }
+
+       for (tmp = 0; tmp < hash_size; tmp++)
+               INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
+
+       journal->j_revoke_table[1] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
+       if (!journal->j_revoke_table[1]) {
+               kfree(journal->j_revoke_table[0]->hash_table);
+               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
+               return -ENOMEM;
+       }
+
+       journal->j_revoke = journal->j_revoke_table[1];
+
+       /* Check that the hash_size is a power of two */
+       J_ASSERT ((hash_size & (hash_size-1)) == 0);
+
+       journal->j_revoke->hash_size = hash_size;
+
+       journal->j_revoke->hash_shift = shift;
+
+       journal->j_revoke->hash_table =
+               kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
+       if (!journal->j_revoke->hash_table) {
+               kfree(journal->j_revoke_table[0]->hash_table);
+               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
+               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[1]);
+               journal->j_revoke = NULL;
+               return -ENOMEM;
+       }
+
+       for (tmp = 0; tmp < hash_size; tmp++)
+               INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
+
+       spin_lock_init(&journal->j_revoke_lock);
+
+       return 0;
+}
+
+/* Destoy a journal's revoke table.  The table must already be empty! */
+
+void jbd2_journal_destroy_revoke(journal_t *journal)
+{
+       struct jbd2_revoke_table_s *table;
+       struct list_head *hash_list;
+       int i;
+
+       table = journal->j_revoke_table[0];
+       if (!table)
+               return;
+
+       for (i=0; i<table->hash_size; i++) {
+               hash_list = &table->hash_table[i];
+               J_ASSERT (list_empty(hash_list));
+       }
+
+       kfree(table->hash_table);
+       kmem_cache_free(jbd2_revoke_table_cache, table);
+       journal->j_revoke = NULL;
+
+       table = journal->j_revoke_table[1];
+       if (!table)
+               return;
+
+       for (i=0; i<table->hash_size; i++) {
+               hash_list = &table->hash_table[i];
+               J_ASSERT (list_empty(hash_list));
+       }
+
+       kfree(table->hash_table);
+       kmem_cache_free(jbd2_revoke_table_cache, table);
+       journal->j_revoke = NULL;
+}
+
+
+#ifdef __KERNEL__
+
+/*
+ * jbd2_journal_revoke: revoke a given buffer_head from the journal.  This
+ * prevents the block from being replayed during recovery if we take a
+ * crash after this current transaction commits.  Any subsequent
+ * metadata writes of the buffer in this transaction cancel the
+ * revoke.
+ *
+ * Note that this call may block --- it is up to the caller to make
+ * sure that there are no further calls to journal_write_metadata
+ * before the revoke is complete.  In ext3, this implies calling the
+ * revoke before clearing the block bitmap when we are deleting
+ * metadata.
+ *
+ * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a
+ * parameter, but does _not_ forget the buffer_head if the bh was only
+ * found implicitly.
+ *
+ * bh_in may not be a journalled buffer - it may have come off
+ * the hash tables without an attached journal_head.
+ *
+ * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count
+ * by one.
+ */
+
+int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
+                  struct buffer_head *bh_in)
+{
+       struct buffer_head *bh = NULL;
+       journal_t *journal;
+       struct block_device *bdev;
+       int err;
+
+       might_sleep();
+       if (bh_in)
+               BUFFER_TRACE(bh_in, "enter");
+
+       journal = handle->h_transaction->t_journal;
+       if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){
+               J_ASSERT (!"Cannot set revoke feature!");
+               return -EINVAL;
+       }
+
+       bdev = journal->j_fs_dev;
+       bh = bh_in;
+
+       if (!bh) {
+               bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
+               if (bh)
+                       BUFFER_TRACE(bh, "found on hash");
+       }
+#ifdef JBD_EXPENSIVE_CHECKING
+       else {
+               struct buffer_head *bh2;
+
+               /* If there is a different buffer_head lying around in
+                * memory anywhere... */
+               bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
+               if (bh2) {
+                       /* ... and it has RevokeValid status... */
+                       if (bh2 != bh && buffer_revokevalid(bh2))
+                               /* ...then it better be revoked too,
+                                * since it's illegal to create a revoke
+                                * record against a buffer_head which is
+                                * not marked revoked --- that would
+                                * risk missing a subsequent revoke
+                                * cancel. */
+                               J_ASSERT_BH(bh2, buffer_revoked(bh2));
+                       put_bh(bh2);
+               }
+       }
+#endif
+
+       /* We really ought not ever to revoke twice in a row without
+           first having the revoke cancelled: it's illegal to free a
+           block twice without allocating it in between! */
+       if (bh) {
+               if (!J_EXPECT_BH(bh, !buffer_revoked(bh),
+                                "inconsistent data on disk")) {
+                       if (!bh_in)
+                               brelse(bh);
+                       return -EIO;
+               }
+               set_buffer_revoked(bh);
+               set_buffer_revokevalid(bh);
+               if (bh_in) {
+                       BUFFER_TRACE(bh_in, "call jbd2_journal_forget");
+                       jbd2_journal_forget(handle, bh_in);
+               } else {
+                       BUFFER_TRACE(bh, "call brelse");
+                       __brelse(bh);
+               }
+       }
+
+       jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in);
+       err = insert_revoke_hash(journal, blocknr,
+                               handle->h_transaction->t_tid);
+       BUFFER_TRACE(bh_in, "exit");
+       return err;
+}
+
+/*
+ * Cancel an outstanding revoke.  For use only internally by the
+ * journaling code (called from jbd2_journal_get_write_access).
+ *
+ * We trust buffer_revoked() on the buffer if the buffer is already
+ * being journaled: if there is no revoke pending on the buffer, then we
+ * don't do anything here.
+ *
+ * This would break if it were possible for a buffer to be revoked and
+ * discarded, and then reallocated within the same transaction.  In such
+ * a case we would have lost the revoked bit, but when we arrived here
+ * the second time we would still have a pending revoke to cancel.  So,
+ * do not trust the Revoked bit on buffers unless RevokeValid is also
+ * set.
+ *
+ * The caller must have the journal locked.
+ */
+int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
+{
+       struct jbd2_revoke_record_s *record;
+       journal_t *journal = handle->h_transaction->t_journal;
+       int need_cancel;
+       int did_revoke = 0;     /* akpm: debug */
+       struct buffer_head *bh = jh2bh(jh);
+
+       jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
+
+       /* Is the existing Revoke bit valid?  If so, we trust it, and
+        * only perform the full cancel if the revoke bit is set.  If
+        * not, we can't trust the revoke bit, and we need to do the
+        * full search for a revoke record. */
+       if (test_set_buffer_revokevalid(bh)) {
+               need_cancel = test_clear_buffer_revoked(bh);
+       } else {
+               need_cancel = 1;
+               clear_buffer_revoked(bh);
+       }
+
+       if (need_cancel) {
+               record = find_revoke_record(journal, bh->b_blocknr);
+               if (record) {
+                       jbd_debug(4, "cancelled existing revoke on "
+                                 "blocknr %llu\n", (unsigned long long)bh->b_blocknr);
+                       spin_lock(&journal->j_revoke_lock);
+                       list_del(&record->hash);
+                       spin_unlock(&journal->j_revoke_lock);
+                       kmem_cache_free(jbd2_revoke_record_cache, record);
+                       did_revoke = 1;
+               }
+       }
+
+#ifdef JBD_EXPENSIVE_CHECKING
+       /* There better not be one left behind by now! */
+       record = find_revoke_record(journal, bh->b_blocknr);
+       J_ASSERT_JH(jh, record == NULL);
+#endif
+
+       /* Finally, have we just cleared revoke on an unhashed
+        * buffer_head?  If so, we'd better make sure we clear the
+        * revoked status on any hashed alias too, otherwise the revoke
+        * state machine will get very upset later on. */
+       if (need_cancel) {
+               struct buffer_head *bh2;
+               bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
+               if (bh2) {
+                       if (bh2 != bh)
+                               clear_buffer_revoked(bh2);
+                       __brelse(bh2);
+               }
+       }
+       return did_revoke;
+}
+
+/* journal_switch_revoke table select j_revoke for next transaction
+ * we do not want to suspend any processing until all revokes are
+ * written -bzzz
+ */
+void jbd2_journal_switch_revoke_table(journal_t *journal)
+{
+       int i;
+
+       if (journal->j_revoke == journal->j_revoke_table[0])
+               journal->j_revoke = journal->j_revoke_table[1];
+       else
+               journal->j_revoke = journal->j_revoke_table[0];
+
+       for (i = 0; i < journal->j_revoke->hash_size; i++)
+               INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
+}
+
+/*
+ * Write revoke records to the journal for all entries in the current
+ * revoke hash, deleting the entries as we go.
+ *
+ * Called with the journal lock held.
+ */
+
+void jbd2_journal_write_revoke_records(journal_t *journal,
+                                 transaction_t *transaction)
+{
+       struct journal_head *descriptor;
+       struct jbd2_revoke_record_s *record;
+       struct jbd2_revoke_table_s *revoke;
+       struct list_head *hash_list;
+       int i, offset, count;
+
+       descriptor = NULL;
+       offset = 0;
+       count = 0;
+
+       /* select revoke table for committing transaction */
+       revoke = journal->j_revoke == journal->j_revoke_table[0] ?
+               journal->j_revoke_table[1] : journal->j_revoke_table[0];
+
+       for (i = 0; i < revoke->hash_size; i++) {
+               hash_list = &revoke->hash_table[i];
+
+               while (!list_empty(hash_list)) {
+                       record = (struct jbd2_revoke_record_s *)
+                               hash_list->next;
+                       write_one_revoke_record(journal, transaction,
+                                               &descriptor, &offset,
+                                               record);
+                       count++;
+                       list_del(&record->hash);
+                       kmem_cache_free(jbd2_revoke_record_cache, record);
+               }
+       }
+       if (descriptor)
+               flush_descriptor(journal, descriptor, offset);
+       jbd_debug(1, "Wrote %d revoke records\n", count);
+}
+
+/*
+ * Write out one revoke record.  We need to create a new descriptor
+ * block if the old one is full or if we have not already created one.
+ */
+
+static void write_one_revoke_record(journal_t *journal,
+                                   transaction_t *transaction,
+                                   struct journal_head **descriptorp,
+                                   int *offsetp,
+                                   struct jbd2_revoke_record_s *record)
+{
+       struct journal_head *descriptor;
+       int offset;
+       journal_header_t *header;
+
+       /* If we are already aborting, this all becomes a noop.  We
+           still need to go round the loop in
+           jbd2_journal_write_revoke_records in order to free all of the
+           revoke records: only the IO to the journal is omitted. */
+       if (is_journal_aborted(journal))
+               return;
+
+       descriptor = *descriptorp;
+       offset = *offsetp;
+
+       /* Make sure we have a descriptor with space left for the record */
+       if (descriptor) {
+               if (offset == journal->j_blocksize) {
+                       flush_descriptor(journal, descriptor, offset);
+                       descriptor = NULL;
+               }
+       }
+
+       if (!descriptor) {
+               descriptor = jbd2_journal_get_descriptor_buffer(journal);
+               if (!descriptor)
+                       return;
+               header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
+               header->h_magic     = cpu_to_be32(JBD2_MAGIC_NUMBER);
+               header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
+               header->h_sequence  = cpu_to_be32(transaction->t_tid);
+
+               /* Record it so that we can wait for IO completion later */
+               JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
+               jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl);
+
+               offset = sizeof(jbd2_journal_revoke_header_t);
+               *descriptorp = descriptor;
+       }
+
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
+               * ((__be64 *)(&jh2bh(descriptor)->b_data[offset])) =
+                       cpu_to_be64(record->blocknr);
+               offset += 8;
+
+       } else {
+               * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
+                       cpu_to_be32(record->blocknr);
+               offset += 4;
+       }
+
+       *offsetp = offset;
+}
+
+/*
+ * Flush a revoke descriptor out to the journal.  If we are aborting,
+ * this is a noop; otherwise we are generating a buffer which needs to
+ * be waited for during commit, so it has to go onto the appropriate
+ * journal buffer list.
+ */
+
+static void flush_descriptor(journal_t *journal,
+                            struct journal_head *descriptor,
+                            int offset)
+{
+       jbd2_journal_revoke_header_t *header;
+       struct buffer_head *bh = jh2bh(descriptor);
+
+       if (is_journal_aborted(journal)) {
+               put_bh(bh);
+               return;
+       }
+
+       header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
+       header->r_count = cpu_to_be32(offset);
+       set_buffer_jwrite(bh);
+       BUFFER_TRACE(bh, "write");
+       set_buffer_dirty(bh);
+       ll_rw_block(SWRITE, 1, &bh);
+}
+#endif
+
+/*
+ * Revoke support for recovery.
+ *
+ * Recovery needs to be able to:
+ *
+ *  record all revoke records, including the tid of the latest instance
+ *  of each revoke in the journal
+ *
+ *  check whether a given block in a given transaction should be replayed
+ *  (ie. has not been revoked by a revoke record in that or a subsequent
+ *  transaction)
+ *
+ *  empty the revoke table after recovery.
+ */
+
+/*
+ * First, setting revoke records.  We create a new revoke record for
+ * every block ever revoked in the log as we scan it for recovery, and
+ * we update the existing records if we find multiple revokes for a
+ * single block.
+ */
+
+int jbd2_journal_set_revoke(journal_t *journal,
+                      unsigned long long blocknr,
+                      tid_t sequence)
+{
+       struct jbd2_revoke_record_s *record;
+
+       record = find_revoke_record(journal, blocknr);
+       if (record) {
+               /* If we have multiple occurrences, only record the
+                * latest sequence number in the hashed record */
+               if (tid_gt(sequence, record->sequence))
+                       record->sequence = sequence;
+               return 0;
+       }
+       return insert_revoke_hash(journal, blocknr, sequence);
+}
+
+/*
+ * Test revoke records.  For a given block referenced in the log, has
+ * that block been revoked?  A revoke record with a given transaction
+ * sequence number revokes all blocks in that transaction and earlier
+ * ones, but later transactions still need replayed.
+ */
+
+int jbd2_journal_test_revoke(journal_t *journal,
+                       unsigned long long blocknr,
+                       tid_t sequence)
+{
+       struct jbd2_revoke_record_s *record;
+
+       record = find_revoke_record(journal, blocknr);
+       if (!record)
+               return 0;
+       if (tid_gt(sequence, record->sequence))
+               return 0;
+       return 1;
+}
+
+/*
+ * Finally, once recovery is over, we need to clear the revoke table so
+ * that it can be reused by the running filesystem.
+ */
+
+void jbd2_journal_clear_revoke(journal_t *journal)
+{
+       int i;
+       struct list_head *hash_list;
+       struct jbd2_revoke_record_s *record;
+       struct jbd2_revoke_table_s *revoke;
+
+       revoke = journal->j_revoke;
+
+       for (i = 0; i < revoke->hash_size; i++) {
+               hash_list = &revoke->hash_table[i];
+               while (!list_empty(hash_list)) {
+                       record = (struct jbd2_revoke_record_s*) hash_list->next;
+                       list_del(&record->hash);
+                       kmem_cache_free(jbd2_revoke_record_cache, record);
+               }
+       }
+}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
new file mode 100644 (file)
index 0000000..149957b
--- /dev/null
@@ -0,0 +1,2080 @@
+/*
+ * linux/fs/transaction.c
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
+ *
+ * Copyright 1998 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Generic filesystem transaction handling code; part of the ext2fs
+ * journaling system.
+ *
+ * This file manages transactions (compound commits managed by the
+ * journaling code) and handles (individual atomic operations by the
+ * filesystem).
+ */
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/smp_lock.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+
+/*
+ * jbd2_get_transaction: obtain a new transaction_t object.
+ *
+ * Simply allocate and initialise a new transaction.  Create it in
+ * RUNNING state and add it to the current journal (which should not
+ * have an existing running transaction: we only make a new transaction
+ * once we have started to commit the old one).
+ *
+ * Preconditions:
+ *     The journal MUST be locked.  We don't perform atomic mallocs on the
+ *     new transaction and we can't block without protecting against other
+ *     processes trying to touch the journal while it is in transition.
+ *
+ * Called under j_state_lock
+ */
+
+static transaction_t *
+jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
+{
+       transaction->t_journal = journal;
+       transaction->t_state = T_RUNNING;
+       transaction->t_tid = journal->j_transaction_sequence++;
+       transaction->t_expires = jiffies + journal->j_commit_interval;
+       spin_lock_init(&transaction->t_handle_lock);
+
+       /* Set up the commit timer for the new transaction. */
+       journal->j_commit_timer.expires = transaction->t_expires;
+       add_timer(&journal->j_commit_timer);
+
+       J_ASSERT(journal->j_running_transaction == NULL);
+       journal->j_running_transaction = transaction;
+
+       return transaction;
+}
+
+/*
+ * Handle management.
+ *
+ * A handle_t is an object which represents a single atomic update to a
+ * filesystem, and which tracks all of the modifications which form part
+ * of that one update.
+ */
+
+/*
+ * start_this_handle: Given a handle, deal with any locking or stalling
+ * needed to make sure that there is enough journal space for the handle
+ * to begin.  Attach the handle to a transaction and set up the
+ * transaction's buffer credits.
+ */
+
+static int start_this_handle(journal_t *journal, handle_t *handle)
+{
+       transaction_t *transaction;
+       int needed;
+       int nblocks = handle->h_buffer_credits;
+       transaction_t *new_transaction = NULL;
+       int ret = 0;
+
+       if (nblocks > journal->j_max_transaction_buffers) {
+               printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
+                      current->comm, nblocks,
+                      journal->j_max_transaction_buffers);
+               ret = -ENOSPC;
+               goto out;
+       }
+
+alloc_transaction:
+       if (!journal->j_running_transaction) {
+               new_transaction = jbd_kmalloc(sizeof(*new_transaction),
+                                               GFP_NOFS);
+               if (!new_transaction) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               memset(new_transaction, 0, sizeof(*new_transaction));
+       }
+
+       jbd_debug(3, "New handle %p going live.\n", handle);
+
+repeat:
+
+       /*
+        * We need to hold j_state_lock until t_updates has been incremented,
+        * for proper journal barrier handling
+        */
+       spin_lock(&journal->j_state_lock);
+repeat_locked:
+       if (is_journal_aborted(journal) ||
+           (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
+               spin_unlock(&journal->j_state_lock);
+               ret = -EROFS;
+               goto out;
+       }
+
+       /* Wait on the journal's transaction barrier if necessary */
+       if (journal->j_barrier_count) {
+               spin_unlock(&journal->j_state_lock);
+               wait_event(journal->j_wait_transaction_locked,
+                               journal->j_barrier_count == 0);
+               goto repeat;
+       }
+
+       if (!journal->j_running_transaction) {
+               if (!new_transaction) {
+                       spin_unlock(&journal->j_state_lock);
+                       goto alloc_transaction;
+               }
+               jbd2_get_transaction(journal, new_transaction);
+               new_transaction = NULL;
+       }
+
+       transaction = journal->j_running_transaction;
+
+       /*
+        * If the current transaction is locked down for commit, wait for the
+        * lock to be released.
+        */
+       if (transaction->t_state == T_LOCKED) {
+               DEFINE_WAIT(wait);
+
+               prepare_to_wait(&journal->j_wait_transaction_locked,
+                                       &wait, TASK_UNINTERRUPTIBLE);
+               spin_unlock(&journal->j_state_lock);
+               schedule();
+               finish_wait(&journal->j_wait_transaction_locked, &wait);
+               goto repeat;
+       }
+
+       /*
+        * If there is not enough space left in the log to write all potential
+        * buffers requested by this operation, we need to stall pending a log
+        * checkpoint to free some more log space.
+        */
+       spin_lock(&transaction->t_handle_lock);
+       needed = transaction->t_outstanding_credits + nblocks;
+
+       if (needed > journal->j_max_transaction_buffers) {
+               /*
+                * If the current transaction is already too large, then start
+                * to commit it: we can then go back and attach this handle to
+                * a new transaction.
+                */
+               DEFINE_WAIT(wait);
+
+               jbd_debug(2, "Handle %p starting new commit...\n", handle);
+               spin_unlock(&transaction->t_handle_lock);
+               prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               __jbd2_log_start_commit(journal, transaction->t_tid);
+               spin_unlock(&journal->j_state_lock);
+               schedule();
+               finish_wait(&journal->j_wait_transaction_locked, &wait);
+               goto repeat;
+       }
+
+       /*
+        * The commit code assumes that it can get enough log space
+        * without forcing a checkpoint.  This is *critical* for
+        * correctness: a checkpoint of a buffer which is also
+        * associated with a committing transaction creates a deadlock,
+        * so commit simply cannot force through checkpoints.
+        *
+        * We must therefore ensure the necessary space in the journal
+        * *before* starting to dirty potentially checkpointed buffers
+        * in the new transaction.
+        *
+        * The worst part is, any transaction currently committing can
+        * reduce the free space arbitrarily.  Be careful to account for
+        * those buffers when checkpointing.
+        */
+
+       /*
+        * @@@ AKPM: This seems rather over-defensive.  We're giving commit
+        * a _lot_ of headroom: 1/4 of the journal plus the size of
+        * the committing transaction.  Really, we only need to give it
+        * committing_transaction->t_outstanding_credits plus "enough" for
+        * the log control blocks.
+        * Also, this test is inconsitent with the matching one in
+        * jbd2_journal_extend().
+        */
+       if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
+               jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
+               spin_unlock(&transaction->t_handle_lock);
+               __jbd2_log_wait_for_space(journal);
+               goto repeat_locked;
+       }
+
+       /* OK, account for the buffers that this operation expects to
+        * use and add the handle to the running transaction. */
+
+       handle->h_transaction = transaction;
+       transaction->t_outstanding_credits += nblocks;
+       transaction->t_updates++;
+       transaction->t_handle_count++;
+       jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
+                 handle, nblocks, transaction->t_outstanding_credits,
+                 __jbd2_log_space_left(journal));
+       spin_unlock(&transaction->t_handle_lock);
+       spin_unlock(&journal->j_state_lock);
+out:
+       if (unlikely(new_transaction))          /* It's usually NULL */
+               kfree(new_transaction);
+       return ret;
+}
+
+/* Allocate a new handle.  This should probably be in a slab... */
+static handle_t *new_handle(int nblocks)
+{
+       handle_t *handle = jbd_alloc_handle(GFP_NOFS);
+       if (!handle)
+               return NULL;
+       memset(handle, 0, sizeof(*handle));
+       handle->h_buffer_credits = nblocks;
+       handle->h_ref = 1;
+
+       return handle;
+}
+
+/**
+ * handle_t *jbd2_journal_start() - Obtain a new handle.
+ * @journal: Journal to start transaction on.
+ * @nblocks: number of block buffer we might modify
+ *
+ * We make sure that the transaction can guarantee at least nblocks of
+ * modified buffers in the log.  We block until the log can guarantee
+ * that much space.
+ *
+ * This function is visible to journal users (like ext3fs), so is not
+ * called with the journal already locked.
+ *
+ * Return a pointer to a newly allocated handle, or NULL on failure
+ */
+handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
+{
+       handle_t *handle = journal_current_handle();
+       int err;
+
+       if (!journal)
+               return ERR_PTR(-EROFS);
+
+       if (handle) {
+               J_ASSERT(handle->h_transaction->t_journal == journal);
+               handle->h_ref++;
+               return handle;
+       }
+
+       handle = new_handle(nblocks);
+       if (!handle)
+               return ERR_PTR(-ENOMEM);
+
+       current->journal_info = handle;
+
+       err = start_this_handle(journal, handle);
+       if (err < 0) {
+               jbd_free_handle(handle);
+               current->journal_info = NULL;
+               handle = ERR_PTR(err);
+       }
+       return handle;
+}
+
+/**
+ * int jbd2_journal_extend() - extend buffer credits.
+ * @handle:  handle to 'extend'
+ * @nblocks: nr blocks to try to extend by.
+ *
+ * Some transactions, such as large extends and truncates, can be done
+ * atomically all at once or in several stages.  The operation requests
+ * a credit for a number of buffer modications in advance, but can
+ * extend its credit if it needs more.
+ *
+ * jbd2_journal_extend tries to give the running handle more buffer credits.
+ * It does not guarantee that allocation - this is a best-effort only.
+ * The calling process MUST be able to deal cleanly with a failure to
+ * extend here.
+ *
+ * Return 0 on success, non-zero on failure.
+ *
+ * return code < 0 implies an error
+ * return code > 0 implies normal transaction-full status.
+ */
+int jbd2_journal_extend(handle_t *handle, int nblocks)
+{
+       transaction_t *transaction = handle->h_transaction;
+       journal_t *journal = transaction->t_journal;
+       int result;
+       int wanted;
+
+       result = -EIO;
+       if (is_handle_aborted(handle))
+               goto out;
+
+       result = 1;
+
+       spin_lock(&journal->j_state_lock);
+
+       /* Don't extend a locked-down transaction! */
+       if (handle->h_transaction->t_state != T_RUNNING) {
+               jbd_debug(3, "denied handle %p %d blocks: "
+                         "transaction not running\n", handle, nblocks);
+               goto error_out;
+       }
+
+       spin_lock(&transaction->t_handle_lock);
+       wanted = transaction->t_outstanding_credits + nblocks;
+
+       if (wanted > journal->j_max_transaction_buffers) {
+               jbd_debug(3, "denied handle %p %d blocks: "
+                         "transaction too large\n", handle, nblocks);
+               goto unlock;
+       }
+
+       if (wanted > __jbd2_log_space_left(journal)) {
+               jbd_debug(3, "denied handle %p %d blocks: "
+                         "insufficient log space\n", handle, nblocks);
+               goto unlock;
+       }
+
+       handle->h_buffer_credits += nblocks;
+       transaction->t_outstanding_credits += nblocks;
+       result = 0;
+
+       jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
+unlock:
+       spin_unlock(&transaction->t_handle_lock);
+error_out:
+       spin_unlock(&journal->j_state_lock);
+out:
+       return result;
+}
+
+
+/**
+ * int jbd2_journal_restart() - restart a handle .
+ * @handle:  handle to restart
+ * @nblocks: nr credits requested
+ *
+ * Restart a handle for a multi-transaction filesystem
+ * operation.
+ *
+ * If the jbd2_journal_extend() call above fails to grant new buffer credits
+ * to a running handle, a call to jbd2_journal_restart will commit the
+ * handle's transaction so far and reattach the handle to a new
+ * transaction capabable of guaranteeing the requested number of
+ * credits.
+ */
+
+int jbd2_journal_restart(handle_t *handle, int nblocks)
+{
+       transaction_t *transaction = handle->h_transaction;
+       journal_t *journal = transaction->t_journal;
+       int ret;
+
+       /* If we've had an abort of any type, don't even think about
+        * actually doing the restart! */
+       if (is_handle_aborted(handle))
+               return 0;
+
+       /*
+        * First unlink the handle from its current transaction, and start the
+        * commit on that.
+        */
+       J_ASSERT(transaction->t_updates > 0);
+       J_ASSERT(journal_current_handle() == handle);
+
+       spin_lock(&journal->j_state_lock);
+       spin_lock(&transaction->t_handle_lock);
+       transaction->t_outstanding_credits -= handle->h_buffer_credits;
+       transaction->t_updates--;
+
+       if (!transaction->t_updates)
+               wake_up(&journal->j_wait_updates);
+       spin_unlock(&transaction->t_handle_lock);
+
+       jbd_debug(2, "restarting handle %p\n", handle);
+       __jbd2_log_start_commit(journal, transaction->t_tid);
+       spin_unlock(&journal->j_state_lock);
+
+       handle->h_buffer_credits = nblocks;
+       ret = start_this_handle(journal, handle);
+       return ret;
+}
+
+
+/**
+ * void jbd2_journal_lock_updates () - establish a transaction barrier.
+ * @journal:  Journal to establish a barrier on.
+ *
+ * This locks out any further updates from being started, and blocks
+ * until all existing updates have completed, returning only once the
+ * journal is in a quiescent state with no updates running.
+ *
+ * The journal lock should not be held on entry.
+ */
+void jbd2_journal_lock_updates(journal_t *journal)
+{
+       DEFINE_WAIT(wait);
+
+       spin_lock(&journal->j_state_lock);
+       ++journal->j_barrier_count;
+
+       /* Wait until there are no running updates */
+       while (1) {
+               transaction_t *transaction = journal->j_running_transaction;
+
+               if (!transaction)
+                       break;
+
+               spin_lock(&transaction->t_handle_lock);
+               if (!transaction->t_updates) {
+                       spin_unlock(&transaction->t_handle_lock);
+                       break;
+               }
+               prepare_to_wait(&journal->j_wait_updates, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               spin_unlock(&transaction->t_handle_lock);
+               spin_unlock(&journal->j_state_lock);
+               schedule();
+               finish_wait(&journal->j_wait_updates, &wait);
+               spin_lock(&journal->j_state_lock);
+       }
+       spin_unlock(&journal->j_state_lock);
+
+       /*
+        * We have now established a barrier against other normal updates, but
+        * we also need to barrier against other jbd2_journal_lock_updates() calls
+        * to make sure that we serialise special journal-locked operations
+        * too.
+        */
+       mutex_lock(&journal->j_barrier);
+}
+
+/**
+ * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
+ * @journal:  Journal to release the barrier on.
+ *
+ * Release a transaction barrier obtained with jbd2_journal_lock_updates().
+ *
+ * Should be called without the journal lock held.
+ */
+void jbd2_journal_unlock_updates (journal_t *journal)
+{
+       J_ASSERT(journal->j_barrier_count != 0);
+
+       mutex_unlock(&journal->j_barrier);
+       spin_lock(&journal->j_state_lock);
+       --journal->j_barrier_count;
+       spin_unlock(&journal->j_state_lock);
+       wake_up(&journal->j_wait_transaction_locked);
+}
+
+/*
+ * Report any unexpected dirty buffers which turn up.  Normally those
+ * indicate an error, but they can occur if the user is running (say)
+ * tune2fs to modify the live filesystem, so we need the option of
+ * continuing as gracefully as possible.  #
+ *
+ * The caller should already hold the journal lock and
+ * j_list_lock spinlock: most callers will need those anyway
+ * in order to probe the buffer's journaling state safely.
+ */
+static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
+{
+       int jlist;
+
+       /* If this buffer is one which might reasonably be dirty
+        * --- ie. data, or not part of this journal --- then
+        * we're OK to leave it alone, but otherwise we need to
+        * move the dirty bit to the journal's own internal
+        * JBDDirty bit. */
+       jlist = jh->b_jlist;
+
+       if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
+           jlist == BJ_Shadow || jlist == BJ_Forget) {
+               struct buffer_head *bh = jh2bh(jh);
+
+               if (test_clear_buffer_dirty(bh))
+                       set_buffer_jbddirty(bh);
+       }
+}
+
+/*
+ * If the buffer is already part of the current transaction, then there
+ * is nothing we need to do.  If it is already part of a prior
+ * transaction which we are still committing to disk, then we need to
+ * make sure that we do not overwrite the old copy: we do copy-out to
+ * preserve the copy going to disk.  We also account the buffer against
+ * the handle's metadata buffer credits (unless the buffer is already
+ * part of the transaction, that is).
+ *
+ */
+static int
+do_get_write_access(handle_t *handle, struct journal_head *jh,
+                       int force_copy)
+{
+       struct buffer_head *bh;
+       transaction_t *transaction;
+       journal_t *journal;
+       int error;
+       char *frozen_buffer = NULL;
+       int need_copy = 0;
+
+       if (is_handle_aborted(handle))
+               return -EROFS;
+
+       transaction = handle->h_transaction;
+       journal = transaction->t_journal;
+
+       jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
+
+       JBUFFER_TRACE(jh, "entry");
+repeat:
+       bh = jh2bh(jh);
+
+       /* @@@ Need to check for errors here at some point. */
+
+       lock_buffer(bh);
+       jbd_lock_bh_state(bh);
+
+       /* We now hold the buffer lock so it is safe to query the buffer
+        * state.  Is the buffer dirty?
+        *
+        * If so, there are two possibilities.  The buffer may be
+        * non-journaled, and undergoing a quite legitimate writeback.
+        * Otherwise, it is journaled, and we don't expect dirty buffers
+        * in that state (the buffers should be marked JBD_Dirty
+        * instead.)  So either the IO is being done under our own
+        * control and this is a bug, or it's a third party IO such as
+        * dump(8) (which may leave the buffer scheduled for read ---
+        * ie. locked but not dirty) or tune2fs (which may actually have
+        * the buffer dirtied, ugh.)  */
+
+       if (buffer_dirty(bh)) {
+               /*
+                * First question: is this buffer already part of the current
+                * transaction or the existing committing transaction?
+                */
+               if (jh->b_transaction) {
+                       J_ASSERT_JH(jh,
+                               jh->b_transaction == transaction ||
+                               jh->b_transaction ==
+                                       journal->j_committing_transaction);
+                       if (jh->b_next_transaction)
+                               J_ASSERT_JH(jh, jh->b_next_transaction ==
+                                                       transaction);
+               }
+               /*
+                * In any case we need to clean the dirty flag and we must
+                * do it under the buffer lock to be sure we don't race
+                * with running write-out.
+                */
+               JBUFFER_TRACE(jh, "Unexpected dirty buffer");
+               jbd_unexpected_dirty_buffer(jh);
+       }
+
+       unlock_buffer(bh);
+
+       error = -EROFS;
+       if (is_handle_aborted(handle)) {
+               jbd_unlock_bh_state(bh);
+               goto out;
+       }
+       error = 0;
+
+       /*
+        * The buffer is already part of this transaction if b_transaction or
+        * b_next_transaction points to it
+        */
+       if (jh->b_transaction == transaction ||
+           jh->b_next_transaction == transaction)
+               goto done;
+
+       /*
+        * If there is already a copy-out version of this buffer, then we don't
+        * need to make another one
+        */
+       if (jh->b_frozen_data) {
+               JBUFFER_TRACE(jh, "has frozen data");
+               J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+               jh->b_next_transaction = transaction;
+               goto done;
+       }
+
+       /* Is there data here we need to preserve? */
+
+       if (jh->b_transaction && jh->b_transaction != transaction) {
+               JBUFFER_TRACE(jh, "owned by older transaction");
+               J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+               J_ASSERT_JH(jh, jh->b_transaction ==
+                                       journal->j_committing_transaction);
+
+               /* There is one case we have to be very careful about.
+                * If the committing transaction is currently writing
+                * this buffer out to disk and has NOT made a copy-out,
+                * then we cannot modify the buffer contents at all
+                * right now.  The essence of copy-out is that it is the
+                * extra copy, not the primary copy, which gets
+                * journaled.  If the primary copy is already going to
+                * disk then we cannot do copy-out here. */
+
+               if (jh->b_jlist == BJ_Shadow) {
+                       DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
+                       wait_queue_head_t *wqh;
+
+                       wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
+
+                       JBUFFER_TRACE(jh, "on shadow: sleep");
+                       jbd_unlock_bh_state(bh);
+                       /* commit wakes up all shadow buffers after IO */
+                       for ( ; ; ) {
+                               prepare_to_wait(wqh, &wait.wait,
+                                               TASK_UNINTERRUPTIBLE);
+                               if (jh->b_jlist != BJ_Shadow)
+                                       break;
+                               schedule();
+                       }
+                       finish_wait(wqh, &wait.wait);
+                       goto repeat;
+               }
+
+               /* Only do the copy if the currently-owning transaction
+                * still needs it.  If it is on the Forget list, the
+                * committing transaction is past that stage.  The
+                * buffer had better remain locked during the kmalloc,
+                * but that should be true --- we hold the journal lock
+                * still and the buffer is already on the BUF_JOURNAL
+                * list so won't be flushed.
+                *
+                * Subtle point, though: if this is a get_undo_access,
+                * then we will be relying on the frozen_data to contain
+                * the new value of the committed_data record after the
+                * transaction, so we HAVE to force the frozen_data copy
+                * in that case. */
+
+               if (jh->b_jlist != BJ_Forget || force_copy) {
+                       JBUFFER_TRACE(jh, "generate frozen data");
+                       if (!frozen_buffer) {
+                               JBUFFER_TRACE(jh, "allocate memory for buffer");
+                               jbd_unlock_bh_state(bh);
+                               frozen_buffer =
+                                       jbd2_slab_alloc(jh2bh(jh)->b_size,
+                                                        GFP_NOFS);
+                               if (!frozen_buffer) {
+                                       printk(KERN_EMERG
+                                              "%s: OOM for frozen_buffer\n",
+                                              __FUNCTION__);
+                                       JBUFFER_TRACE(jh, "oom!");
+                                       error = -ENOMEM;
+                                       jbd_lock_bh_state(bh);
+                                       goto done;
+                               }
+                               goto repeat;
+                       }
+                       jh->b_frozen_data = frozen_buffer;
+                       frozen_buffer = NULL;
+                       need_copy = 1;
+               }
+               jh->b_next_transaction = transaction;
+       }
+
+
+       /*
+        * Finally, if the buffer is not journaled right now, we need to make
+        * sure it doesn't get written to disk before the caller actually
+        * commits the new data
+        */
+       if (!jh->b_transaction) {
+               JBUFFER_TRACE(jh, "no transaction");
+               J_ASSERT_JH(jh, !jh->b_next_transaction);
+               jh->b_transaction = transaction;
+               JBUFFER_TRACE(jh, "file as BJ_Reserved");
+               spin_lock(&journal->j_list_lock);
+               __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
+               spin_unlock(&journal->j_list_lock);
+       }
+
+done:
+       if (need_copy) {
+               struct page *page;
+               int offset;
+               char *source;
+
+               J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
+                           "Possible IO failure.\n");
+               page = jh2bh(jh)->b_page;
+               offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
+               source = kmap_atomic(page, KM_USER0);
+               memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
+               kunmap_atomic(source, KM_USER0);
+       }
+       jbd_unlock_bh_state(bh);
+
+       /*
+        * If we are about to journal a buffer, then any revoke pending on it is
+        * no longer valid
+        */
+       jbd2_journal_cancel_revoke(handle, jh);
+
+out:
+       if (unlikely(frozen_buffer))    /* It's usually NULL */
+               jbd2_slab_free(frozen_buffer, bh->b_size);
+
+       JBUFFER_TRACE(jh, "exit");
+       return error;
+}
+
+/**
+ * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
+ * @handle: transaction to add buffer modifications to
+ * @bh:     bh to be used for metadata writes
+ * @credits: variable that will receive credits for the buffer
+ *
+ * Returns an error code or 0 on success.
+ *
+ * In full data journalling mode the buffer may be of type BJ_AsyncData,
+ * because we're write()ing a buffer which is also part of a shared mapping.
+ */
+
+int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
+{
+       struct journal_head *jh = jbd2_journal_add_journal_head(bh);
+       int rc;
+
+       /* We do not want to get caught playing with fields which the
+        * log thread also manipulates.  Make sure that the buffer
+        * completes any outstanding IO before proceeding. */
+       rc = do_get_write_access(handle, jh, 0);
+       jbd2_journal_put_journal_head(jh);
+       return rc;
+}
+
+
+/*
+ * When the user wants to journal a newly created buffer_head
+ * (ie. getblk() returned a new buffer and we are going to populate it
+ * manually rather than reading off disk), then we need to keep the
+ * buffer_head locked until it has been completely filled with new
+ * data.  In this case, we should be able to make the assertion that
+ * the bh is not already part of an existing transaction.
+ *
+ * The buffer should already be locked by the caller by this point.
+ * There is no lock ranking violation: it was a newly created,
+ * unlocked buffer beforehand. */
+
+/**
+ * int jbd2_journal_get_create_access () - notify intent to use newly created bh
+ * @handle: transaction to new buffer to
+ * @bh: new buffer.
+ *
+ * Call this if you create a new bh.
+ */
+int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
+{
+       transaction_t *transaction = handle->h_transaction;
+       journal_t *journal = transaction->t_journal;
+       struct journal_head *jh = jbd2_journal_add_journal_head(bh);
+       int err;
+
+       jbd_debug(5, "journal_head %p\n", jh);
+       err = -EROFS;
+       if (is_handle_aborted(handle))
+               goto out;
+       err = 0;
+
+       JBUFFER_TRACE(jh, "entry");
+       /*
+        * The buffer may already belong to this transaction due to pre-zeroing
+        * in the filesystem's new_block code.  It may also be on the previous,
+        * committing transaction's lists, but it HAS to be in Forget state in
+        * that case: the transaction must have deleted the buffer for it to be
+        * reused here.
+        */
+       jbd_lock_bh_state(bh);
+       spin_lock(&journal->j_list_lock);
+       J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
+               jh->b_transaction == NULL ||
+               (jh->b_transaction == journal->j_committing_transaction &&
+                         jh->b_jlist == BJ_Forget)));
+
+       J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+       J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
+
+       if (jh->b_transaction == NULL) {
+               jh->b_transaction = transaction;
+               JBUFFER_TRACE(jh, "file as BJ_Reserved");
+               __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
+       } else if (jh->b_transaction == journal->j_committing_transaction) {
+               JBUFFER_TRACE(jh, "set next transaction");
+               jh->b_next_transaction = transaction;
+       }
+       spin_unlock(&journal->j_list_lock);
+       jbd_unlock_bh_state(bh);
+
+       /*
+        * akpm: I added this.  ext3_alloc_branch can pick up new indirect
+        * blocks which contain freed but then revoked metadata.  We need
+        * to cancel the revoke in case we end up freeing it yet again
+        * and the reallocating as data - this would cause a second revoke,
+        * which hits an assertion error.
+        */
+       JBUFFER_TRACE(jh, "cancelling revoke");
+       jbd2_journal_cancel_revoke(handle, jh);
+       jbd2_journal_put_journal_head(jh);
+out:
+       return err;
+}
+
+/**
+ * int jbd2_journal_get_undo_access() -  Notify intent to modify metadata with
+ *     non-rewindable consequences
+ * @handle: transaction
+ * @bh: buffer to undo
+ * @credits: store the number of taken credits here (if not NULL)
+ *
+ * Sometimes there is a need to distinguish between metadata which has
+ * been committed to disk and that which has not.  The ext3fs code uses
+ * this for freeing and allocating space, we have to make sure that we
+ * do not reuse freed space until the deallocation has been committed,
+ * since if we overwrote that space we would make the delete
+ * un-rewindable in case of a crash.
+ *
+ * To deal with that, jbd2_journal_get_undo_access requests write access to a
+ * buffer for parts of non-rewindable operations such as delete
+ * operations on the bitmaps.  The journaling code must keep a copy of
+ * the buffer's contents prior to the undo_access call until such time
+ * as we know that the buffer has definitely been committed to disk.
+ *
+ * We never need to know which transaction the committed data is part
+ * of, buffers touched here are guaranteed to be dirtied later and so
+ * will be committed to a new transaction in due course, at which point
+ * we can discard the old committed data pointer.
+ *
+ * Returns error number or 0 on success.
+ */
+int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
+{
+       int err;
+       struct journal_head *jh = jbd2_journal_add_journal_head(bh);
+       char *committed_data = NULL;
+
+       JBUFFER_TRACE(jh, "entry");
+
+       /*
+        * Do this first --- it can drop the journal lock, so we want to
+        * make sure that obtaining the committed_data is done
+        * atomically wrt. completion of any outstanding commits.
+        */
+       err = do_get_write_access(handle, jh, 1);
+       if (err)
+               goto out;
+
+repeat:
+       if (!jh->b_committed_data) {
+               committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
+               if (!committed_data) {
+                       printk(KERN_EMERG "%s: No memory for committed data\n",
+                               __FUNCTION__);
+                       err = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       jbd_lock_bh_state(bh);
+       if (!jh->b_committed_data) {
+               /* Copy out the current buffer contents into the
+                * preserved, committed copy. */
+               JBUFFER_TRACE(jh, "generate b_committed data");
+               if (!committed_data) {
+                       jbd_unlock_bh_state(bh);
+                       goto repeat;
+               }
+
+               jh->b_committed_data = committed_data;
+               committed_data = NULL;
+               memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
+       }
+       jbd_unlock_bh_state(bh);
+out:
+       jbd2_journal_put_journal_head(jh);
+       if (unlikely(committed_data))
+               jbd2_slab_free(committed_data, bh->b_size);
+       return err;
+}
+
+/**
+ * int jbd2_journal_dirty_data() -  mark a buffer as containing dirty data which
+ *                             needs to be flushed before we can commit the
+ *                             current transaction.
+ * @handle: transaction
+ * @bh: bufferhead to mark
+ *
+ * The buffer is placed on the transaction's data list and is marked as
+ * belonging to the transaction.
+ *
+ * Returns error number or 0 on success.
+ *
+ * jbd2_journal_dirty_data() can be called via page_launder->ext3_writepage
+ * by kswapd.
+ */
+int jbd2_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
+{
+       journal_t *journal = handle->h_transaction->t_journal;
+       int need_brelse = 0;
+       struct journal_head *jh;
+
+       if (is_handle_aborted(handle))
+               return 0;
+
+       jh = jbd2_journal_add_journal_head(bh);
+       JBUFFER_TRACE(jh, "entry");
+
+       /*
+        * The buffer could *already* be dirty.  Writeout can start
+        * at any time.
+        */
+       jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid);
+
+       /*
+        * What if the buffer is already part of a running transaction?
+        *
+        * There are two cases:
+        * 1) It is part of the current running transaction.  Refile it,
+        *    just in case we have allocated it as metadata, deallocated
+        *    it, then reallocated it as data.
+        * 2) It is part of the previous, still-committing transaction.
+        *    If all we want to do is to guarantee that the buffer will be
+        *    written to disk before this new transaction commits, then
+        *    being sure that the *previous* transaction has this same
+        *    property is sufficient for us!  Just leave it on its old
+        *    transaction.
+        *
+        * In case (2), the buffer must not already exist as metadata
+        * --- that would violate write ordering (a transaction is free
+        * to write its data at any point, even before the previous
+        * committing transaction has committed).  The caller must
+        * never, ever allow this to happen: there's nothing we can do
+        * about it in this layer.
+        */
+       jbd_lock_bh_state(bh);
+       spin_lock(&journal->j_list_lock);
+       if (jh->b_transaction) {
+               JBUFFER_TRACE(jh, "has transaction");
+               if (jh->b_transaction != handle->h_transaction) {
+                       JBUFFER_TRACE(jh, "belongs to older transaction");
+                       J_ASSERT_JH(jh, jh->b_transaction ==
+                                       journal->j_committing_transaction);
+
+                       /* @@@ IS THIS TRUE  ? */
+                       /*
+                        * Not any more.  Scenario: someone does a write()
+                        * in data=journal mode.  The buffer's transaction has
+                        * moved into commit.  Then someone does another
+                        * write() to the file.  We do the frozen data copyout
+                        * and set b_next_transaction to point to j_running_t.
+                        * And while we're in that state, someone does a
+                        * writepage() in an attempt to pageout the same area
+                        * of the file via a shared mapping.  At present that
+                        * calls jbd2_journal_dirty_data(), and we get right here.
+                        * It may be too late to journal the data.  Simply
+                        * falling through to the next test will suffice: the
+                        * data will be dirty and wil be checkpointed.  The
+                        * ordering comments in the next comment block still
+                        * apply.
+                        */
+                       //J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
+
+                       /*
+                        * If we're journalling data, and this buffer was
+                        * subject to a write(), it could be metadata, forget
+                        * or shadow against the committing transaction.  Now,
+                        * someone has dirtied the same darn page via a mapping
+                        * and it is being writepage()'d.
+                        * We *could* just steal the page from commit, with some
+                        * fancy locking there.  Instead, we just skip it -
+                        * don't tie the page's buffers to the new transaction
+                        * at all.
+                        * Implication: if we crash before the writepage() data
+                        * is written into the filesystem, recovery will replay
+                        * the write() data.
+                        */
+                       if (jh->b_jlist != BJ_None &&
+                                       jh->b_jlist != BJ_SyncData &&
+                                       jh->b_jlist != BJ_Locked) {
+                               JBUFFER_TRACE(jh, "Not stealing");
+                               goto no_journal;
+                       }
+
+                       /*
+                        * This buffer may be undergoing writeout in commit.  We
+                        * can't return from here and let the caller dirty it
+                        * again because that can cause the write-out loop in
+                        * commit to never terminate.
+                        */
+                       if (buffer_dirty(bh)) {
+                               get_bh(bh);
+                               spin_unlock(&journal->j_list_lock);
+                               jbd_unlock_bh_state(bh);
+                               need_brelse = 1;
+                               sync_dirty_buffer(bh);
+                               jbd_lock_bh_state(bh);
+                               spin_lock(&journal->j_list_lock);
+                               /* The buffer may become locked again at any
+                                  time if it is redirtied */
+                       }
+
+                       /* journal_clean_data_list() may have got there first */
+                       if (jh->b_transaction != NULL) {
+                               JBUFFER_TRACE(jh, "unfile from commit");
+                               __jbd2_journal_temp_unlink_buffer(jh);
+                               /* It still points to the committing
+                                * transaction; move it to this one so
+                                * that the refile assert checks are
+                                * happy. */
+                               jh->b_transaction = handle->h_transaction;
+                       }
+                       /* The buffer will be refiled below */
+
+               }
+               /*
+                * Special case --- the buffer might actually have been
+                * allocated and then immediately deallocated in the previous,
+                * committing transaction, so might still be left on that
+                * transaction's metadata lists.
+                */
+               if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
+                       JBUFFER_TRACE(jh, "not on correct data list: unfile");
+                       J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
+                       __jbd2_journal_temp_unlink_buffer(jh);
+                       jh->b_transaction = handle->h_transaction;
+                       JBUFFER_TRACE(jh, "file as data");
+                       __jbd2_journal_file_buffer(jh, handle->h_transaction,
+                                               BJ_SyncData);
+               }
+       } else {
+               JBUFFER_TRACE(jh, "not on a transaction");
+               __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
+       }
+no_journal:
+       spin_unlock(&journal->j_list_lock);
+       jbd_unlock_bh_state(bh);
+       if (need_brelse) {
+               BUFFER_TRACE(bh, "brelse");
+               __brelse(bh);
+       }
+       JBUFFER_TRACE(jh, "exit");
+       jbd2_journal_put_journal_head(jh);
+       return 0;
+}
+
+/**
+ * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
+ * @handle: transaction to add buffer to.
+ * @bh: buffer to mark
+ *
+ * mark dirty metadata which needs to be journaled as part of the current
+ * transaction.
+ *
+ * The buffer is placed on the transaction's metadata list and is marked
+ * as belonging to the transaction.
+ *
+ * Returns error number or 0 on success.
+ *
+ * Special care needs to be taken if the buffer already belongs to the
+ * current committing transaction (in which case we should have frozen
+ * data present for that commit).  In that case, we don't relink the
+ * buffer: that only gets done when the old transaction finally
+ * completes its commit.
+ */
+int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
+{
+       transaction_t *transaction = handle->h_transaction;
+       journal_t *journal = transaction->t_journal;
+       struct journal_head *jh = bh2jh(bh);
+
+       jbd_debug(5, "journal_head %p\n", jh);
+       JBUFFER_TRACE(jh, "entry");
+       if (is_handle_aborted(handle))
+               goto out;
+
+       jbd_lock_bh_state(bh);
+
+       if (jh->b_modified == 0) {
+               /*
+                * This buffer's got modified and becoming part
+                * of the transaction. This needs to be done
+                * once a transaction -bzzz
+                */
+               jh->b_modified = 1;
+               J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
+               handle->h_buffer_credits--;
+       }
+
+       /*
+        * fastpath, to avoid expensive locking.  If this buffer is already
+        * on the running transaction's metadata list there is nothing to do.
+        * Nobody can take it off again because there is a handle open.
+        * I _think_ we're OK here with SMP barriers - a mistaken decision will
+        * result in this test being false, so we go in and take the locks.
+        */
+       if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
+               JBUFFER_TRACE(jh, "fastpath");
+               J_ASSERT_JH(jh, jh->b_transaction ==
+                                       journal->j_running_transaction);
+               goto out_unlock_bh;
+       }
+
+       set_buffer_jbddirty(bh);
+
+       /*
+        * Metadata already on the current transaction list doesn't
+        * need to be filed.  Metadata on another transaction's list must
+        * be committing, and will be refiled once the commit completes:
+        * leave it alone for now.
+        */
+       if (jh->b_transaction != transaction) {
+               JBUFFER_TRACE(jh, "already on other transaction");
+               J_ASSERT_JH(jh, jh->b_transaction ==
+                                       journal->j_committing_transaction);
+               J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
+               /* And this case is illegal: we can't reuse another
+                * transaction's data buffer, ever. */
+               goto out_unlock_bh;
+       }
+
+       /* That test should have eliminated the following case: */
+       J_ASSERT_JH(jh, jh->b_frozen_data == 0);
+
+       JBUFFER_TRACE(jh, "file as BJ_Metadata");
+       spin_lock(&journal->j_list_lock);
+       __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
+       spin_unlock(&journal->j_list_lock);
+out_unlock_bh:
+       jbd_unlock_bh_state(bh);
+out:
+       JBUFFER_TRACE(jh, "exit");
+       return 0;
+}
+
+/*
+ * jbd2_journal_release_buffer: undo a get_write_access without any buffer
+ * updates, if the update decided in the end that it didn't need access.
+ *
+ */
+void
+jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
+{
+       BUFFER_TRACE(bh, "entry");
+}
+
+/**
+ * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
+ * @handle: transaction handle
+ * @bh:     bh to 'forget'
+ *
+ * We can only do the bforget if there are no commits pending against the
+ * buffer.  If the buffer is dirty in the current running transaction we
+ * can safely unlink it.
+ *
+ * bh may not be a journalled buffer at all - it may be a non-JBD
+ * buffer which came off the hashtable.  Check for this.
+ *
+ * Decrements bh->b_count by one.
+ *
+ * Allow this call even if the handle has aborted --- it may be part of
+ * the caller's cleanup after an abort.
+ */
+int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
+{
+       transaction_t *transaction = handle->h_transaction;
+       journal_t *journal = transaction->t_journal;
+       struct journal_head *jh;
+       int drop_reserve = 0;
+       int err = 0;
+
+       BUFFER_TRACE(bh, "entry");
+
+       jbd_lock_bh_state(bh);
+       spin_lock(&journal->j_list_lock);
+
+       if (!buffer_jbd(bh))
+               goto not_jbd;
+       jh = bh2jh(bh);
+
+       /* Critical error: attempting to delete a bitmap buffer, maybe?
+        * Don't do any jbd operations, and return an error. */
+       if (!J_EXPECT_JH(jh, !jh->b_committed_data,
+                        "inconsistent data on disk")) {
+               err = -EIO;
+               goto not_jbd;
+       }
+
+       /*
+        * The buffer's going from the transaction, we must drop
+        * all references -bzzz
+        */
+       jh->b_modified = 0;
+
+       if (jh->b_transaction == handle->h_transaction) {
+               J_ASSERT_JH(jh, !jh->b_frozen_data);
+
+               /* If we are forgetting a buffer which is already part
+                * of this transaction, then we can just drop it from
+                * the transaction immediately. */
+               clear_buffer_dirty(bh);
+               clear_buffer_jbddirty(bh);
+
+               JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
+
+               drop_reserve = 1;
+
+               /*
+                * We are no longer going to journal this buffer.
+                * However, the commit of this transaction is still
+                * important to the buffer: the delete that we are now
+                * processing might obsolete an old log entry, so by
+                * committing, we can satisfy the buffer's checkpoint.
+                *
+                * So, if we have a checkpoint on the buffer, we should
+                * now refile the buffer on our BJ_Forget list so that
+                * we know to remove the checkpoint after we commit.
+                */
+
+               if (jh->b_cp_transaction) {
+                       __jbd2_journal_temp_unlink_buffer(jh);
+                       __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
+               } else {
+                       __jbd2_journal_unfile_buffer(jh);
+                       jbd2_journal_remove_journal_head(bh);
+                       __brelse(bh);
+                       if (!buffer_jbd(bh)) {
+                               spin_unlock(&journal->j_list_lock);
+                               jbd_unlock_bh_state(bh);
+                               __bforget(bh);
+                               goto drop;
+                       }
+               }
+       } else if (jh->b_transaction) {
+               J_ASSERT_JH(jh, (jh->b_transaction ==
+                                journal->j_committing_transaction));
+               /* However, if the buffer is still owned by a prior
+                * (committing) transaction, we can't drop it yet... */
+               JBUFFER_TRACE(jh, "belongs to older transaction");
+               /* ... but we CAN drop it from the new transaction if we
+                * have also modified it since the original commit. */
+
+               if (jh->b_next_transaction) {
+                       J_ASSERT(jh->b_next_transaction == transaction);
+                       jh->b_next_transaction = NULL;
+                       drop_reserve = 1;
+               }
+       }
+
+not_jbd:
+       spin_unlock(&journal->j_list_lock);
+       jbd_unlock_bh_state(bh);
+       __brelse(bh);
+drop:
+       if (drop_reserve) {
+               /* no need to reserve log space for this block -bzzz */
+               handle->h_buffer_credits++;
+       }
+       return err;
+}
+
+/**
+ * int jbd2_journal_stop() - complete a transaction
+ * @handle: tranaction to complete.
+ *
+ * All done for a particular handle.
+ *
+ * There is not much action needed here.  We just return any remaining
+ * buffer credits to the transaction and remove the handle.  The only
+ * complication is that we need to start a commit operation if the
+ * filesystem is marked for synchronous update.
+ *
+ * jbd2_journal_stop itself will not usually return an error, but it may
+ * do so in unusual circumstances.  In particular, expect it to
+ * return -EIO if a jbd2_journal_abort has been executed since the
+ * transaction began.
+ */
+int jbd2_journal_stop(handle_t *handle)
+{
+       transaction_t *transaction = handle->h_transaction;
+       journal_t *journal = transaction->t_journal;
+       int old_handle_count, err;
+       pid_t pid;
+
+       J_ASSERT(transaction->t_updates > 0);
+       J_ASSERT(journal_current_handle() == handle);
+
+       if (is_handle_aborted(handle))
+               err = -EIO;
+       else
+               err = 0;
+
+       if (--handle->h_ref > 0) {
+               jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
+                         handle->h_ref);
+               return err;
+       }
+
+       jbd_debug(4, "Handle %p going down\n", handle);
+
+       /*
+        * Implement synchronous transaction batching.  If the handle
+        * was synchronous, don't force a commit immediately.  Let's
+        * yield and let another thread piggyback onto this transaction.
+        * Keep doing that while new threads continue to arrive.
+        * It doesn't cost much - we're about to run a commit and sleep
+        * on IO anyway.  Speeds up many-threaded, many-dir operations
+        * by 30x or more...
+        *
+        * But don't do this if this process was the most recent one to
+        * perform a synchronous write.  We do this to detect the case where a
+        * single process is doing a stream of sync writes.  No point in waiting
+        * for joiners in that case.
+        */
+       pid = current->pid;
+       if (handle->h_sync && journal->j_last_sync_writer != pid) {
+               journal->j_last_sync_writer = pid;
+               do {
+                       old_handle_count = transaction->t_handle_count;
+                       schedule_timeout_uninterruptible(1);
+               } while (old_handle_count != transaction->t_handle_count);
+       }
+
+       current->journal_info = NULL;
+       spin_lock(&journal->j_state_lock);
+       spin_lock(&transaction->t_handle_lock);
+       transaction->t_outstanding_credits -= handle->h_buffer_credits;
+       transaction->t_updates--;
+       if (!transaction->t_updates) {
+               wake_up(&journal->j_wait_updates);
+               if (journal->j_barrier_count)
+                       wake_up(&journal->j_wait_transaction_locked);
+       }
+
+       /*
+        * If the handle is marked SYNC, we need to set another commit
+        * going!  We also want to force a commit if the current
+        * transaction is occupying too much of the log, or if the
+        * transaction is too old now.
+        */
+       if (handle->h_sync ||
+                       transaction->t_outstanding_credits >
+                               journal->j_max_transaction_buffers ||
+                       time_after_eq(jiffies, transaction->t_expires)) {
+               /* Do this even for aborted journals: an abort still
+                * completes the commit thread, it just doesn't write
+                * anything to disk. */
+               tid_t tid = transaction->t_tid;
+
+               spin_unlock(&transaction->t_handle_lock);
+               jbd_debug(2, "transaction too old, requesting commit for "
+                                       "handle %p\n", handle);
+               /* This is non-blocking */
+               __jbd2_log_start_commit(journal, transaction->t_tid);
+               spin_unlock(&journal->j_state_lock);
+
+               /*
+                * Special case: JBD2_SYNC synchronous updates require us
+                * to wait for the commit to complete.
+                */
+               if (handle->h_sync && !(current->flags & PF_MEMALLOC))
+                       err = jbd2_log_wait_commit(journal, tid);
+       } else {
+               spin_unlock(&transaction->t_handle_lock);
+               spin_unlock(&journal->j_state_lock);
+       }
+
+       jbd_free_handle(handle);
+       return err;
+}
+
+/**int jbd2_journal_force_commit() - force any uncommitted transactions
+ * @journal: journal to force
+ *
+ * For synchronous operations: force any uncommitted transactions
+ * to disk.  May seem kludgy, but it reuses all the handle batching
+ * code in a very simple manner.
+ */
+int jbd2_journal_force_commit(journal_t *journal)
+{
+       handle_t *handle;
+       int ret;
+
+       handle = jbd2_journal_start(journal, 1);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+       } else {
+               handle->h_sync = 1;
+               ret = jbd2_journal_stop(handle);
+       }
+       return ret;
+}
+
+/*
+ *
+ * List management code snippets: various functions for manipulating the
+ * transaction buffer lists.
+ *
+ */
+
+/*
+ * Append a buffer to a transaction list, given the transaction's list head
+ * pointer.
+ *
+ * j_list_lock is held.
+ *
+ * jbd_lock_bh_state(jh2bh(jh)) is held.
+ */
+
+static inline void
+__blist_add_buffer(struct journal_head **list, struct journal_head *jh)
+{
+       if (!*list) {
+               jh->b_tnext = jh->b_tprev = jh;
+               *list = jh;
+       } else {
+               /* Insert at the tail of the list to preserve order */
+               struct journal_head *first = *list, *last = first->b_tprev;
+               jh->b_tprev = last;
+               jh->b_tnext = first;
+               last->b_tnext = first->b_tprev = jh;
+       }
+}
+
+/*
+ * Remove a buffer from a transaction list, given the transaction's list
+ * head pointer.
+ *
+ * Called with j_list_lock held, and the journal may not be locked.
+ *
+ * jbd_lock_bh_state(jh2bh(jh)) is held.
+ */
+
+static inline void
+__blist_del_buffer(struct journal_head **list, struct journal_head *jh)
+{
+       if (*list == jh) {
+               *list = jh->b_tnext;
+               if (*list == jh)
+                       *list = NULL;
+       }
+       jh->b_tprev->b_tnext = jh->b_tnext;
+       jh->b_tnext->b_tprev = jh->b_tprev;
+}
+
+/*
+ * Remove a buffer from the appropriate transaction list.
+ *
+ * Note that this function can *change* the value of
+ * bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
+ * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list.  If the caller
+ * is holding onto a copy of one of thee pointers, it could go bad.
+ * Generally the caller needs to re-read the pointer from the transaction_t.
+ *
+ * Called under j_list_lock.  The journal may not be locked.
+ */
+void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
+{
+       struct journal_head **list = NULL;
+       transaction_t *transaction;
+       struct buffer_head *bh = jh2bh(jh);
+
+       J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+       transaction = jh->b_transaction;
+       if (transaction)
+               assert_spin_locked(&transaction->t_journal->j_list_lock);
+
+       J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
+       if (jh->b_jlist != BJ_None)
+               J_ASSERT_JH(jh, transaction != 0);
+
+       switch (jh->b_jlist) {
+       case BJ_None:
+               return;
+       case BJ_SyncData:
+               list = &transaction->t_sync_datalist;
+               break;
+       case BJ_Metadata:
+               transaction->t_nr_buffers--;
+               J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
+               list = &transaction->t_buffers;
+               break;
+       case BJ_Forget:
+               list = &transaction->t_forget;
+               break;
+       case BJ_IO:
+               list = &transaction->t_iobuf_list;
+               break;
+       case BJ_Shadow:
+               list = &transaction->t_shadow_list;
+               break;
+       case BJ_LogCtl:
+               list = &transaction->t_log_list;
+               break;
+       case BJ_Reserved:
+               list = &transaction->t_reserved_list;
+               break;
+       case BJ_Locked:
+               list = &transaction->t_locked_list;
+               break;
+       }
+
+       __blist_del_buffer(list, jh);
+       jh->b_jlist = BJ_None;
+       if (test_clear_buffer_jbddirty(bh))
+               mark_buffer_dirty(bh);  /* Expose it to the VM */
+}
+
+void __jbd2_journal_unfile_buffer(struct journal_head *jh)
+{
+       __jbd2_journal_temp_unlink_buffer(jh);
+       jh->b_transaction = NULL;
+}
+
+void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
+{
+       jbd_lock_bh_state(jh2bh(jh));
+       spin_lock(&journal->j_list_lock);
+       __jbd2_journal_unfile_buffer(jh);
+       spin_unlock(&journal->j_list_lock);
+       jbd_unlock_bh_state(jh2bh(jh));
+}
+
+/*
+ * Called from jbd2_journal_try_to_free_buffers().
+ *
+ * Called under jbd_lock_bh_state(bh)
+ */
+static void
+__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
+{
+       struct journal_head *jh;
+
+       jh = bh2jh(bh);
+
+       if (buffer_locked(bh) || buffer_dirty(bh))
+               goto out;
+
+       if (jh->b_next_transaction != 0)
+               goto out;
+
+       spin_lock(&journal->j_list_lock);
+       if (jh->b_transaction != 0 && jh->b_cp_transaction == 0) {
+               if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
+                       /* A written-back ordered data buffer */
+                       JBUFFER_TRACE(jh, "release data");
+                       __jbd2_journal_unfile_buffer(jh);
+                       jbd2_journal_remove_journal_head(bh);
+                       __brelse(bh);
+               }
+       } else if (jh->b_cp_transaction != 0 && jh->b_transaction == 0) {
+               /* written-back checkpointed metadata buffer */
+               if (jh->b_jlist == BJ_None) {
+                       JBUFFER_TRACE(jh, "remove from checkpoint list");
+                       __jbd2_journal_remove_checkpoint(jh);
+                       jbd2_journal_remove_journal_head(bh);
+                       __brelse(bh);
+               }
+       }
+       spin_unlock(&journal->j_list_lock);
+out:
+       return;
+}
+
+
+/**
+ * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
+ * @journal: journal for operation
+ * @page: to try and free
+ * @unused_gfp_mask: unused
+ *
+ *
+ * For all the buffers on this page,
+ * if they are fully written out ordered data, move them onto BUF_CLEAN
+ * so try_to_free_buffers() can reap them.
+ *
+ * This function returns non-zero if we wish try_to_free_buffers()
+ * to be called. We do this if the page is releasable by try_to_free_buffers().
+ * We also do it if the page has locked or dirty buffers and the caller wants
+ * us to perform sync or async writeout.
+ *
+ * This complicates JBD locking somewhat.  We aren't protected by the
+ * BKL here.  We wish to remove the buffer from its committing or
+ * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
+ *
+ * This may *change* the value of transaction_t->t_datalist, so anyone
+ * who looks at t_datalist needs to lock against this function.
+ *
+ * Even worse, someone may be doing a jbd2_journal_dirty_data on this
+ * buffer.  So we need to lock against that.  jbd2_journal_dirty_data()
+ * will come out of the lock with the buffer dirty, which makes it
+ * ineligible for release here.
+ *
+ * Who else is affected by this?  hmm...  Really the only contender
+ * is do_get_write_access() - it could be looking at the buffer while
+ * journal_try_to_free_buffer() is changing its state.  But that
+ * cannot happen because we never reallocate freed data as metadata
+ * while the data is part of a transaction.  Yes?
+ */
+int jbd2_journal_try_to_free_buffers(journal_t *journal,
+                               struct page *page, gfp_t unused_gfp_mask)
+{
+       struct buffer_head *head;
+       struct buffer_head *bh;
+       int ret = 0;
+
+       J_ASSERT(PageLocked(page));
+
+       head = page_buffers(page);
+       bh = head;
+       do {
+               struct journal_head *jh;
+
+               /*
+                * We take our own ref against the journal_head here to avoid
+                * having to add tons of locking around each instance of
+                * jbd2_journal_remove_journal_head() and jbd2_journal_put_journal_head().
+                */
+               jh = jbd2_journal_grab_journal_head(bh);
+               if (!jh)
+                       continue;
+
+               jbd_lock_bh_state(bh);
+               __journal_try_to_free_buffer(journal, bh);
+               jbd2_journal_put_journal_head(jh);
+               jbd_unlock_bh_state(bh);
+               if (buffer_jbd(bh))
+                       goto busy;
+       } while ((bh = bh->b_this_page) != head);
+       ret = try_to_free_buffers(page);
+busy:
+       return ret;
+}
+
+/*
+ * This buffer is no longer needed.  If it is on an older transaction's
+ * checkpoint list we need to record it on this transaction's forget list
+ * to pin this buffer (and hence its checkpointing transaction) down until
+ * this transaction commits.  If the buffer isn't on a checkpoint list, we
+ * release it.
+ * Returns non-zero if JBD no longer has an interest in the buffer.
+ *
+ * Called under j_list_lock.
+ *
+ * Called under jbd_lock_bh_state(bh).
+ */
+static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
+{
+       int may_free = 1;
+       struct buffer_head *bh = jh2bh(jh);
+
+       __jbd2_journal_unfile_buffer(jh);
+
+       if (jh->b_cp_transaction) {
+               JBUFFER_TRACE(jh, "on running+cp transaction");
+               __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
+               clear_buffer_jbddirty(bh);
+               may_free = 0;
+       } else {
+               JBUFFER_TRACE(jh, "on running transaction");
+               jbd2_journal_remove_journal_head(bh);
+               __brelse(bh);
+       }
+       return may_free;
+}
+
+/*
+ * jbd2_journal_invalidatepage
+ *
+ * This code is tricky.  It has a number of cases to deal with.
+ *
+ * There are two invariants which this code relies on:
+ *
+ * i_size must be updated on disk before we start calling invalidatepage on the
+ * data.
+ *
+ *  This is done in ext3 by defining an ext3_setattr method which
+ *  updates i_size before truncate gets going.  By maintaining this
+ *  invariant, we can be sure that it is safe to throw away any buffers
+ *  attached to the current transaction: once the transaction commits,
+ *  we know that the data will not be needed.
+ *
+ *  Note however that we can *not* throw away data belonging to the
+ *  previous, committing transaction!
+ *
+ * Any disk blocks which *are* part of the previous, committing
+ * transaction (and which therefore cannot be discarded immediately) are
+ * not going to be reused in the new running transaction
+ *
+ *  The bitmap committed_data images guarantee this: any block which is
+ *  allocated in one transaction and removed in the next will be marked
+ *  as in-use in the committed_data bitmap, so cannot be reused until
+ *  the next transaction to delete the block commits.  This means that
+ *  leaving committing buffers dirty is quite safe: the disk blocks
+ *  cannot be reallocated to a different file and so buffer aliasing is
+ *  not possible.
+ *
+ *
+ * The above applies mainly to ordered data mode.  In writeback mode we
+ * don't make guarantees about the order in which data hits disk --- in
+ * particular we don't guarantee that new dirty data is flushed before
+ * transaction commit --- so it is always safe just to discard data
+ * immediately in that mode.  --sct
+ */
+
+/*
+ * The journal_unmap_buffer helper function returns zero if the buffer
+ * concerned remains pinned as an anonymous buffer belonging to an older
+ * transaction.
+ *
+ * We're outside-transaction here.  Either or both of j_running_transaction
+ * and j_committing_transaction may be NULL.
+ */
+static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+{
+       transaction_t *transaction;
+       struct journal_head *jh;
+       int may_free = 1;
+       int ret;
+
+       BUFFER_TRACE(bh, "entry");
+
+       /*
+        * It is safe to proceed here without the j_list_lock because the
+        * buffers cannot be stolen by try_to_free_buffers as long as we are
+        * holding the page lock. --sct
+        */
+
+       if (!buffer_jbd(bh))
+               goto zap_buffer_unlocked;
+
+       spin_lock(&journal->j_state_lock);
+       jbd_lock_bh_state(bh);
+       spin_lock(&journal->j_list_lock);
+
+       jh = jbd2_journal_grab_journal_head(bh);
+       if (!jh)
+               goto zap_buffer_no_jh;
+
+       transaction = jh->b_transaction;
+       if (transaction == NULL) {
+               /* First case: not on any transaction.  If it
+                * has no checkpoint link, then we can zap it:
+                * it's a writeback-mode buffer so we don't care
+                * if it hits disk safely. */
+               if (!jh->b_cp_transaction) {
+                       JBUFFER_TRACE(jh, "not on any transaction: zap");
+                       goto zap_buffer;
+               }
+
+               if (!buffer_dirty(bh)) {
+                       /* bdflush has written it.  We can drop it now */
+                       goto zap_buffer;
+               }
+
+               /* OK, it must be in the journal but still not
+                * written fully to disk: it's metadata or
+                * journaled data... */
+
+               if (journal->j_running_transaction) {
+                       /* ... and once the current transaction has
+                        * committed, the buffer won't be needed any
+                        * longer. */
+                       JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
+                       ret = __dispose_buffer(jh,
+                                       journal->j_running_transaction);
+                       jbd2_journal_put_journal_head(jh);
+                       spin_unlock(&journal->j_list_lock);
+                       jbd_unlock_bh_state(bh);
+                       spin_unlock(&journal->j_state_lock);
+                       return ret;
+               } else {
+                       /* There is no currently-running transaction. So the
+                        * orphan record which we wrote for this file must have
+                        * passed into commit.  We must attach this buffer to
+                        * the committing transaction, if it exists. */
+                       if (journal->j_committing_transaction) {
+                               JBUFFER_TRACE(jh, "give to committing trans");
+                               ret = __dispose_buffer(jh,
+                                       journal->j_committing_transaction);
+                               jbd2_journal_put_journal_head(jh);
+                               spin_unlock(&journal->j_list_lock);
+                               jbd_unlock_bh_state(bh);
+                               spin_unlock(&journal->j_state_lock);
+                               return ret;
+                       } else {
+                               /* The orphan record's transaction has
+                                * committed.  We can cleanse this buffer */
+                               clear_buffer_jbddirty(bh);
+                               goto zap_buffer;
+                       }
+               }
+       } else if (transaction == journal->j_committing_transaction) {
+               if (jh->b_jlist == BJ_Locked) {
+                       /*
+                        * The buffer is on the committing transaction's locked
+                        * list.  We have the buffer locked, so I/O has
+                        * completed.  So we can nail the buffer now.
+                        */
+                       may_free = __dispose_buffer(jh, transaction);
+                       goto zap_buffer;
+               }
+               /*
+                * If it is committing, we simply cannot touch it.  We
+                * can remove it's next_transaction pointer from the
+                * running transaction if that is set, but nothing
+                * else. */
+               JBUFFER_TRACE(jh, "on committing transaction");
+               set_buffer_freed(bh);
+               if (jh->b_next_transaction) {
+                       J_ASSERT(jh->b_next_transaction ==
+                                       journal->j_running_transaction);
+                       jh->b_next_transaction = NULL;
+               }
+               jbd2_journal_put_journal_head(jh);
+               spin_unlock(&journal->j_list_lock);
+               jbd_unlock_bh_state(bh);
+               spin_unlock(&journal->j_state_lock);
+               return 0;
+       } else {
+               /* Good, the buffer belongs to the running transaction.
+                * We are writing our own transaction's data, not any
+                * previous one's, so it is safe to throw it away
+                * (remember that we expect the filesystem to have set
+                * i_size already for this truncate so recovery will not
+                * expose the disk blocks we are discarding here.) */
+               J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
+               may_free = __dispose_buffer(jh, transaction);
+       }
+
+zap_buffer:
+       jbd2_journal_put_journal_head(jh);
+zap_buffer_no_jh:
+       spin_unlock(&journal->j_list_lock);
+       jbd_unlock_bh_state(bh);
+       spin_unlock(&journal->j_state_lock);
+zap_buffer_unlocked:
+       clear_buffer_dirty(bh);
+       J_ASSERT_BH(bh, !buffer_jbddirty(bh));
+       clear_buffer_mapped(bh);
+       clear_buffer_req(bh);
+       clear_buffer_new(bh);
+       bh->b_bdev = NULL;
+       return may_free;
+}
+
+/**
+ * void jbd2_journal_invalidatepage()
+ * @journal: journal to use for flush...
+ * @page:    page to flush
+ * @offset:  length of page to invalidate.
+ *
+ * Reap page buffers containing data after offset in page.
+ *
+ */
+void jbd2_journal_invalidatepage(journal_t *journal,
+                     struct page *page,
+                     unsigned long offset)
+{
+       struct buffer_head *head, *bh, *next;
+       unsigned int curr_off = 0;
+       int may_free = 1;
+
+       if (!PageLocked(page))
+               BUG();
+       if (!page_has_buffers(page))
+               return;
+
+       /* We will potentially be playing with lists other than just the
+        * data lists (especially for journaled data mode), so be
+        * cautious in our locking. */
+
+       head = bh = page_buffers(page);
+       do {
+               unsigned int next_off = curr_off + bh->b_size;
+               next = bh->b_this_page;
+
+               if (offset <= curr_off) {
+                       /* This block is wholly outside the truncation point */
+                       lock_buffer(bh);
+                       may_free &= journal_unmap_buffer(journal, bh);
+                       unlock_buffer(bh);
+               }
+               curr_off = next_off;
+               bh = next;
+
+       } while (bh != head);
+
+       if (!offset) {
+               if (may_free && try_to_free_buffers(page))
+                       J_ASSERT(!page_has_buffers(page));
+       }
+}
+
+/*
+ * File a buffer on the given transaction list.
+ */
+void __jbd2_journal_file_buffer(struct journal_head *jh,
+                       transaction_t *transaction, int jlist)
+{
+       struct journal_head **list = NULL;
+       int was_dirty = 0;
+       struct buffer_head *bh = jh2bh(jh);
+
+       J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+       assert_spin_locked(&transaction->t_journal->j_list_lock);
+
+       J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
+       J_ASSERT_JH(jh, jh->b_transaction == transaction ||
+                               jh->b_transaction == 0);
+
+       if (jh->b_transaction && jh->b_jlist == jlist)
+               return;
+
+       /* The following list of buffer states needs to be consistent
+        * with __jbd_unexpected_dirty_buffer()'s handling of dirty
+        * state. */
+
+       if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
+           jlist == BJ_Shadow || jlist == BJ_Forget) {
+               if (test_clear_buffer_dirty(bh) ||
+                   test_clear_buffer_jbddirty(bh))
+                       was_dirty = 1;
+       }
+
+       if (jh->b_transaction)
+               __jbd2_journal_temp_unlink_buffer(jh);
+       jh->b_transaction = transaction;
+
+       switch (jlist) {
+       case BJ_None:
+               J_ASSERT_JH(jh, !jh->b_committed_data);
+               J_ASSERT_JH(jh, !jh->b_frozen_data);
+               return;
+       case BJ_SyncData:
+               list = &transaction->t_sync_datalist;
+               break;
+       case BJ_Metadata:
+               transaction->t_nr_buffers++;
+               list = &transaction->t_buffers;
+               break;
+       case BJ_Forget:
+               list = &transaction->t_forget;
+               break;
+       case BJ_IO:
+               list = &transaction->t_iobuf_list;
+               break;
+       case BJ_Shadow:
+               list = &transaction->t_shadow_list;
+               break;
+       case BJ_LogCtl:
+               list = &transaction->t_log_list;
+               break;
+       case BJ_Reserved:
+               list = &transaction->t_reserved_list;
+               break;
+       case BJ_Locked:
+               list =  &transaction->t_locked_list;
+               break;
+       }
+
+       __blist_add_buffer(list, jh);
+       jh->b_jlist = jlist;
+
+       if (was_dirty)
+               set_buffer_jbddirty(bh);
+}
+
+void jbd2_journal_file_buffer(struct journal_head *jh,
+                               transaction_t *transaction, int jlist)
+{
+       jbd_lock_bh_state(jh2bh(jh));
+       spin_lock(&transaction->t_journal->j_list_lock);
+       __jbd2_journal_file_buffer(jh, transaction, jlist);
+       spin_unlock(&transaction->t_journal->j_list_lock);
+       jbd_unlock_bh_state(jh2bh(jh));
+}
+
+/*
+ * Remove a buffer from its current buffer list in preparation for
+ * dropping it from its current transaction entirely.  If the buffer has
+ * already started to be used by a subsequent transaction, refile the
+ * buffer on that transaction's metadata list.
+ *
+ * Called under journal->j_list_lock
+ *
+ * Called under jbd_lock_bh_state(jh2bh(jh))
+ */
+void __jbd2_journal_refile_buffer(struct journal_head *jh)
+{
+       int was_dirty;
+       struct buffer_head *bh = jh2bh(jh);
+
+       J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
+       if (jh->b_transaction)
+               assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
+
+       /* If the buffer is now unused, just drop it. */
+       if (jh->b_next_transaction == NULL) {
+               __jbd2_journal_unfile_buffer(jh);
+               return;
+       }
+
+       /*
+        * It has been modified by a later transaction: add it to the new
+        * transaction's metadata list.
+        */
+
+       was_dirty = test_clear_buffer_jbddirty(bh);
+       __jbd2_journal_temp_unlink_buffer(jh);
+       jh->b_transaction = jh->b_next_transaction;
+       jh->b_next_transaction = NULL;
+       __jbd2_journal_file_buffer(jh, jh->b_transaction,
+                               was_dirty ? BJ_Metadata : BJ_Reserved);
+       J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
+
+       if (was_dirty)
+               set_buffer_jbddirty(bh);
+}
+
+/*
+ * For the unlocked version of this call, also make sure that any
+ * hanging journal_head is cleaned up if necessary.
+ *
+ * __jbd2_journal_refile_buffer is usually called as part of a single locked
+ * operation on a buffer_head, in which the caller is probably going to
+ * be hooking the journal_head onto other lists.  In that case it is up
+ * to the caller to remove the journal_head if necessary.  For the
+ * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
+ * doing anything else to the buffer so we need to do the cleanup
+ * ourselves to avoid a jh leak.
+ *
+ * *** The journal_head may be freed by this call! ***
+ */
+void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
+{
+       struct buffer_head *bh = jh2bh(jh);
+
+       jbd_lock_bh_state(bh);
+       spin_lock(&journal->j_list_lock);
+
+       __jbd2_journal_refile_buffer(jh);
+       jbd_unlock_bh_state(bh);
+       jbd2_journal_remove_journal_head(bh);
+
+       spin_unlock(&journal->j_list_lock);
+       __brelse(bh);
+}
index 6de374513c010790ab15e40f4103781f669d93c8..bc4b8106a49010bf8144bfbe3a0e6532860eacb4 100644 (file)
@@ -334,10 +334,10 @@ static int __init init_jffs2_fs(void)
           which means just 'no padding', without the alignment
           thing. But GCC doesn't have that -- we have to just
           hope the structs are the right sizes, instead. */
-       BUG_ON(sizeof(struct jffs2_unknown_node) != 12);
-       BUG_ON(sizeof(struct jffs2_raw_dirent) != 40);
-       BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
-       BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
+       BUILD_BUG_ON(sizeof(struct jffs2_unknown_node) != 12);
+       BUILD_BUG_ON(sizeof(struct jffs2_raw_dirent) != 40);
+       BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
+       BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
 
        printk(KERN_INFO "JFFS2 version 2.2."
 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
index fa370f6eb07b27ddf2b1dc2b984ed5633b3d7c82..399ad11b97bebed99e7a1487a0dab9986d0144ad 100644 (file)
@@ -96,7 +96,7 @@ nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Now check for conflicting locks */
        resp->status = nlmsvc_testlock(file, &argp->lock, &resp->lock);
@@ -126,7 +126,7 @@ nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
 #if 0
        /* If supplied state doesn't match current state, we assume it's
@@ -169,7 +169,7 @@ nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Try to cancel request. */
        resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
@@ -202,7 +202,7 @@ nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Now try to remove the lock */
        resp->status = nlmsvc_unlock(file, &argp->lock);
@@ -339,7 +339,7 @@ nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Now try to create the share */
        resp->status = nlmsvc_share_file(host, file, argp);
@@ -372,7 +372,7 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Now try to lock the file */
        resp->status = nlmsvc_unshare_file(host, file, argp);
index 75b2c81bcb93c01782710b7a36fc501aea42ca9a..6a931f4ab75cacda0398e637f38cb2c3116f7c46 100644 (file)
@@ -59,7 +59,7 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
        struct nlm_host         *host = NULL;
        struct nlm_file         *file = NULL;
        struct nlm_lock         *lock = &argp->lock;
-       u32                     error;
+       u32                     error = 0;
 
        /* nfsd callbacks must have been installed for this procedure */
        if (!nlmsvc_ops)
@@ -88,6 +88,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
 no_locks:
        if (host)
                nlm_release_host(host);
+       if (error)
+               return error;
        return nlm_lck_denied_nolocks;
 }
 
@@ -122,7 +124,7 @@ nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Now check for conflicting locks */
        resp->status = cast_status(nlmsvc_testlock(file, &argp->lock, &resp->lock));
@@ -153,7 +155,7 @@ nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
 #if 0
        /* If supplied state doesn't match current state, we assume it's
@@ -196,7 +198,7 @@ nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Try to cancel request. */
        resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
@@ -229,7 +231,7 @@ nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Now try to remove the lock */
        resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
@@ -368,7 +370,7 @@ nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Now try to create the share */
        resp->status = cast_status(nlmsvc_share_file(host, file, argp));
@@ -401,7 +403,7 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
 
        /* Obtain client and file */
        if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
-               return rpc_success;
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
 
        /* Now try to unshare the file */
        resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
index 514f5f20701ea3d3cc8ba0410dd8fd3098e71c87..7dac96e6c82c666f93d69743d74752dc08e69848 100644 (file)
@@ -135,12 +135,6 @@ out_unlock:
 
 out_free:
        kfree(file);
-#ifdef CONFIG_LOCKD_V4
-       if (nfserr == 1)
-               nfserr = nlm4_stale_fh;
-       else
-#endif
-       nfserr = nlm_lck_denied;
        goto out_unlock;
 }
 
@@ -324,7 +318,16 @@ nlmsvc_same_host(struct nlm_host *host, struct nlm_host *other)
 static int
 nlmsvc_is_client(struct nlm_host *host, struct nlm_host *dummy)
 {
-       return host->h_server;
+       if (host->h_server) {
+               /* we are destroying locks even though the client
+                * hasn't asked us too, so don't unmonitor the
+                * client
+                */
+               if (host->h_nsmhandle)
+                       host->h_nsmhandle->sm_sticky = 1;
+               return 1;
+       } else
+               return 0;
 }
 
 /*
index c11a4b9fb863c5e10ae342b5637dd1ddd4dc1145..1e36bae4d0eb1a4d8a8b17140c3216fad2d5c81b 100644 (file)
@@ -149,12 +149,8 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
                return -ENOMEM;
        s->s_fs_info = sbi;
 
-       /* N.B. These should be compile-time tests.
-          Unfortunately that is impossible. */
-       if (32 != sizeof (struct minix_inode))
-               panic("bad V1 i-node size");
-       if (64 != sizeof(struct minix2_inode))
-               panic("bad V2 i-node size");
+       BUILD_BUG_ON(32 != sizeof (struct minix_inode));
+       BUILD_BUG_ON(64 != sizeof(struct minix2_inode));
 
        if (!sb_set_blocksize(s, BLOCK_SIZE))
                goto out_bad_hblock;
index 7b889ff15ae63a96ef47be64172c7d360ef2d61a..9b9e7e127c03cadd442909c87b6a2012ebbd6432 100644 (file)
@@ -39,18 +39,20 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
        fh_put(&fh);
        rqstp->rq_client = NULL;
        exp_readunlock();
-       /* nlm and nfsd don't share error codes.
-        * we invent: 0 = no error
-        *            1 = stale file handle
-        *            2 = other error
+       /* We return nlm error codes as nlm doesn't know
+        * about nfsd, but nfsd does know about nlm..
         */
        switch (nfserr) {
        case nfs_ok:
                return 0;
+       case nfserr_dropit:
+               return nlm_drop_reply;
+#ifdef CONFIG_LOCKD_V4
        case nfserr_stale:
-               return 1;
+               return nlm4_stale_fh;
+#endif
        default:
-               return 2;
+               return nlm_lck_denied;
        }
 }
 
index f6ca9fb3fc63fb7b78870bd387d8844c648e5b57..324a278f280832dccdc7ab2e53a49802931682a7 100644 (file)
@@ -421,7 +421,7 @@ nfsd4_probe_callback(struct nfs4_client *clp)
 
        /* Create RPC client */
        cb->cb_client = rpc_create(&args);
-       if (!cb->cb_client) {
+       if (IS_ERR(cb->cb_client)) {
                dprintk("NFSD: couldn't create callback client\n");
                goto out_err;
        }
@@ -448,10 +448,10 @@ nfsd4_probe_callback(struct nfs4_client *clp)
 out_rpciod:
        atomic_dec(&clp->cl_count);
        rpciod_down();
-       cb->cb_client = NULL;
 out_clnt:
        rpc_shutdown_client(cb->cb_client);
 out_err:
+       cb->cb_client = NULL;
        dprintk("NFSD: warning: no callback path to client %.*s\n",
                (int)clp->cl_name.len, clp->cl_name.data);
 }
index 8333db12caca56207a0142fcb9a94000f962ad69..d1fac6872c44e732728e21dd66fdbcbc02d14bf1 100644 (file)
@@ -68,20 +68,20 @@ fh_dup2(struct svc_fh *dst, struct svc_fh *src)
 }
 
 static int
-do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
+do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open, int accmode)
 {
-       int accmode, status;
+       int status;
 
        if (open->op_truncate &&
                !(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
                return nfserr_inval;
 
-       accmode = MAY_NOP;
        if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
-               accmode = MAY_READ;
-       if (open->op_share_deny & NFS4_SHARE_ACCESS_WRITE)
+               accmode |= MAY_READ;
+       if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
                accmode |= (MAY_WRITE | MAY_TRUNC);
-       accmode |= MAY_OWNER_OVERRIDE;
+       if (open->op_share_deny & NFS4_SHARE_DENY_WRITE)
+               accmode |= MAY_WRITE;
 
        status = fh_verify(rqstp, current_fh, S_IFREG, accmode);
 
@@ -124,7 +124,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
                                &resfh.fh_handle.fh_base,
                                resfh.fh_handle.fh_size);
 
-               status = do_open_permission(rqstp, current_fh, open);
+               status = do_open_permission(rqstp, current_fh, open, MAY_NOP);
        }
 
        fh_put(&resfh);
@@ -155,7 +155,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
        open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
                (open->op_iattr.ia_size == 0);
 
-       status = do_open_permission(rqstp, current_fh, open);
+       status = do_open_permission(rqstp, current_fh, open, MAY_OWNER_OVERRIDE);
 
        return status;
 }
index 4c29cd7cc8e6e22f876daf0116f5bafd32ddca8e..76b46ebbb10c1b36f17b11879f329b77433ec825 100644 (file)
@@ -339,7 +339,7 @@ static unsigned long long ocfs2_max_file_offset(unsigned int blockshift)
 
 #if BITS_PER_LONG == 32
 # if defined(CONFIG_LBD)
-       BUG_ON(sizeof(sector_t) != 8);
+       BUILD_BUG_ON(sizeof(sector_t) != 8);
        pagefactor = PAGE_CACHE_SIZE;
        bitshift = BITS_PER_LONG;
 # else
index 51c6a748df4921947659abb114fc189f2508fa95..6fb4b6150d7701cd57085f80a0f9222f31be53ed 100644 (file)
@@ -376,18 +376,48 @@ static char *make_block_name(struct gendisk *disk)
        return name;
 }
 
-static void disk_sysfs_symlinks(struct gendisk *disk)
+static int disk_sysfs_symlinks(struct gendisk *disk)
 {
        struct device *target = get_device(disk->driverfs_dev);
+       int err;
+       char *disk_name = NULL;
+
        if (target) {
-               char *disk_name = make_block_name(disk);
-               sysfs_create_link(&disk->kobj,&target->kobj,"device");
-               if (disk_name) {
-                       sysfs_create_link(&target->kobj,&disk->kobj,disk_name);
-                       kfree(disk_name);
+               disk_name = make_block_name(disk);
+               if (!disk_name) {
+                       err = -ENOMEM;
+                       goto err_out;
                }
+
+               err = sysfs_create_link(&disk->kobj, &target->kobj, "device");
+               if (err)
+                       goto err_out_disk_name;
+
+               err = sysfs_create_link(&target->kobj, &disk->kobj, disk_name);
+               if (err)
+                       goto err_out_dev_link;
        }
-       sysfs_create_link(&disk->kobj, &block_subsys.kset.kobj, "subsystem");
+
+       err = sysfs_create_link(&disk->kobj, &block_subsys.kset.kobj,
+                               "subsystem");
+       if (err)
+               goto err_out_disk_name_lnk;
+
+       kfree(disk_name);
+
+       return 0;
+
+err_out_disk_name_lnk:
+       if (target) {
+               sysfs_remove_link(&target->kobj, disk_name);
+err_out_dev_link:
+               sysfs_remove_link(&disk->kobj, "device");
+err_out_disk_name:
+               kfree(disk_name);
+err_out:
+               put_device(target);
+       }
+       return err;
 }
 
 /* Not exported, helper to add_disk(). */
@@ -406,7 +436,11 @@ void register_disk(struct gendisk *disk)
                *s = '!';
        if ((err = kobject_add(&disk->kobj)))
                return;
-       disk_sysfs_symlinks(disk);
+       err = disk_sysfs_symlinks(disk);
+       if (err) {
+               kobject_del(&disk->kobj);
+               return;
+       }
        disk_sysfs_add_subdirs(disk);
 
        /* No minors to use for partitions */
index 82da55b5cffef804f4528bf1bed4d94645f7df2d..26a8f8416b79895af81b2af3a202ddc3dc17b020 100644 (file)
@@ -86,7 +86,7 @@
 
 
 /* Worst case buffer size needed for holding an integer. */
-#define PROC_NUMBUF 10
+#define PROC_NUMBUF 13
 
 struct pid_entry {
        int len;
index c89aa2338191af86e6299496e18882756ccb78d2..9041802df83216f4cb6951d1e5cac7fd30f56743 100644 (file)
@@ -430,20 +430,29 @@ int remove_save_link(struct inode *inode, int truncate)
        return journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT);
 }
 
-static void reiserfs_put_super(struct super_block *s)
+static void reiserfs_kill_sb(struct super_block *s)
 {
-       struct reiserfs_transaction_handle th;
-       th.t_trans_id = 0;
+       if (REISERFS_SB(s)) {
+               if (REISERFS_SB(s)->xattr_root) {
+                       d_invalidate(REISERFS_SB(s)->xattr_root);
+                       dput(REISERFS_SB(s)->xattr_root);
+                       REISERFS_SB(s)->xattr_root = NULL;
+               }
 
-       if (REISERFS_SB(s)->xattr_root) {
-               d_invalidate(REISERFS_SB(s)->xattr_root);
-               dput(REISERFS_SB(s)->xattr_root);
+               if (REISERFS_SB(s)->priv_root) {
+                       d_invalidate(REISERFS_SB(s)->priv_root);
+                       dput(REISERFS_SB(s)->priv_root);
+                       REISERFS_SB(s)->priv_root = NULL;
+               }
        }
 
-       if (REISERFS_SB(s)->priv_root) {
-               d_invalidate(REISERFS_SB(s)->priv_root);
-               dput(REISERFS_SB(s)->priv_root);
-       }
+       kill_block_super(s);
+}
+
+static void reiserfs_put_super(struct super_block *s)
+{
+       struct reiserfs_transaction_handle th;
+       th.t_trans_id = 0;
 
        /* change file system state to current state if it was mounted with read-write permissions */
        if (!(s->s_flags & MS_RDONLY)) {
@@ -2156,7 +2165,7 @@ struct file_system_type reiserfs_fs_type = {
        .owner = THIS_MODULE,
        .name = "reiserfs",
        .get_sb = get_super_block,
-       .kill_sb = kill_block_super,
+       .kill_sb = reiserfs_kill_sb,
        .fs_flags = FS_REQUIRES_DEV,
 };
 
index 13e92dd19fbb1b9165f6cd05e02128c2a0a39cd8..a567010b62ac52e84099f584908629ceae806128 100644 (file)
@@ -607,7 +607,7 @@ find_page:
                        ret = -ENOMEM;
                        page = page_cache_alloc_cold(mapping);
                        if (unlikely(!page))
-                               goto out_nomem;
+                               goto out_ret;
 
                        /*
                         * This will also lock the page
@@ -666,7 +666,7 @@ find_page:
                if (sd->pos + this_len > isize)
                        vmtruncate(mapping->host, isize);
 
-               goto out;
+               goto out_ret;
        }
 
        if (buf->page != page) {
@@ -698,7 +698,7 @@ find_page:
 out:
        page_cache_release(page);
        unlock_page(page);
-out_nomem:
+out_ret:
        return ret;
 }
 
index aec99ddbe53f726a526d4cd0b02abb9f55179e0b..47e554c12e768bc0c5b0ad10b10d3a3a92a90e46 100644 (file)
@@ -260,17 +260,17 @@ int fsync_super(struct super_block *sb)
  *     that need destruction out of superblock, call generic_shutdown_super()
  *     and release aforementioned objects.  Note: dentries and inodes _are_
  *     taken care of and do not need specific handling.
+ *
+ *     Upon calling this function, the filesystem may no longer alter or
+ *     rearrange the set of dentries belonging to this super_block, nor may it
+ *     change the attachments of dentries to inodes.
  */
 void generic_shutdown_super(struct super_block *sb)
 {
-       struct dentry *root = sb->s_root;
        struct super_operations *sop = sb->s_op;
 
-       if (root) {
-               sb->s_root = NULL;
-               shrink_dcache_parent(root);
-               shrink_dcache_sb(sb);
-               dput(root);
+       if (sb->s_root) {
+               shrink_dcache_for_umount(sb);
                fsync_super(sb);
                lock_super(sb);
                sb->s_flags &= ~MS_ACTIVE;
index 350cba5d68034f8dd4bdbb2732e2d5bc0cc17f9a..dc9e7dc07fb7d1be9a8c899f940cccd2fb2000be 100644 (file)
@@ -358,16 +358,11 @@ static int sysv_fill_super(struct super_block *sb, void *data, int silent)
        unsigned long blocknr;
        int size = 0, i;
        
-       if (1024 != sizeof (struct xenix_super_block))
-               panic("Xenix FS: bad superblock size");
-       if (512 != sizeof (struct sysv4_super_block))
-               panic("SystemV FS: bad superblock size");
-       if (512 != sizeof (struct sysv2_super_block))
-               panic("SystemV FS: bad superblock size");
-       if (500 != sizeof (struct coh_super_block))
-               panic("Coherent FS: bad superblock size");
-       if (64 != sizeof (struct sysv_inode))
-               panic("sysv fs: bad inode size");
+       BUILD_BUG_ON(1024 != sizeof (struct xenix_super_block));
+       BUILD_BUG_ON(512 != sizeof (struct sysv4_super_block));
+       BUILD_BUG_ON(512 != sizeof (struct sysv2_super_block));
+       BUILD_BUG_ON(500 != sizeof (struct coh_super_block));
+       BUILD_BUG_ON(64 != sizeof (struct sysv_inode));
 
        sbi = kzalloc(sizeof(struct sysv_sb_info), GFP_KERNEL);
        if (!sbi)
index c5472be6f3a2c41744471e61dafa0386bf038397..e72bfdd887f9b285ccbb3530be8096d61aeb74a8 100644 (file)
@@ -13,6 +13,7 @@
 #define ACPI_PDC_SMP_C_SWCOORD         (0x0040)
 #define ACPI_PDC_SMP_T_SWCOORD         (0x0080)
 #define ACPI_PDC_C_C1_FFH              (0x0100)
+#define ACPI_PDC_C_C2C3_FFH            (0x0200)
 
 #define ACPI_PDC_EST_CAPABILITY_SMP    (ACPI_PDC_SMP_C1PT | \
                                         ACPI_PDC_C_C1_HALT | \
                                         ACPI_PDC_SMP_P_SWCOORD | \
                                         ACPI_PDC_P_FFH)
 
-#define ACPI_PDC_C_CAPABILITY_SMP      (ACPI_PDC_SMP_C2C3 | \
-                                        ACPI_PDC_SMP_C1PT | \
-                                        ACPI_PDC_C_C1_HALT)
+#define ACPI_PDC_C_CAPABILITY_SMP      (ACPI_PDC_SMP_C2C3  | \
+                                        ACPI_PDC_SMP_C1PT  | \
+                                        ACPI_PDC_C_C1_HALT | \
+                                        ACPI_PDC_C_C1_FFH  | \
+                                        ACPI_PDC_C_C2C3_FFH)
 
 #endif                         /* __PDC_INTEL_H__ */
index 9dd5b75961f84c19ba9a731eb071f15950577d75..7798d2a9f793aaab8005c2a45d3362a35029ea5b 100644 (file)
@@ -29,6 +29,9 @@
 #define DOMAIN_COORD_TYPE_SW_ANY       0xfd
 #define DOMAIN_COORD_TYPE_HW_ALL       0xfe
 
+#define ACPI_CSTATE_SYSTEMIO   (0)
+#define ACPI_CSTATE_FFH                (1)
+
 /* Power Management */
 
 struct acpi_processor_cx;
@@ -58,6 +61,8 @@ struct acpi_processor_cx {
        u8 valid;
        u8 type;
        u32 address;
+       u8 space_id;
+       u8 index;
        u32 latency;
        u32 latency_ticks;
        u32 power;
@@ -206,6 +211,9 @@ void arch_acpi_processor_init_pdc(struct acpi_processor *pr);
 #ifdef ARCH_HAS_POWER_INIT
 void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
                                        unsigned int cpu);
+int acpi_processor_ffh_cstate_probe(unsigned int cpu,
+               struct acpi_processor_cx *cx, struct acpi_power_register *reg);
+void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cstate);
 #else
 static inline void acpi_processor_power_init_bm_check(struct
                                                      acpi_processor_flags
@@ -214,6 +222,16 @@ static inline void acpi_processor_power_init_bm_check(struct
        flags->bm_check = 1;
        return;
 }
+static inline int acpi_processor_ffh_cstate_probe(unsigned int cpu,
+               struct acpi_processor_cx *cx, struct acpi_power_register *reg)
+{
+       return -1;
+}
+static inline void acpi_processor_ffh_cstate_enter(
+               struct acpi_processor_cx *cstate)
+{
+       return;
+}
 #endif
 
 /* in processor_perflib.c */
index f5ae98c25d1f4f0f4fb2627b8916ec1648ce20fd..5d15af24573b9f90fa0e6f9bb6dc12bd6363a241 100644 (file)
@@ -533,19 +533,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
 #define eth_io_copy_and_sum(skb,src,len,unused) \
   memcpy_fromio((skb)->data,src,len)
 
-static inline int
-check_signature(const volatile void __iomem *io_addr,
-               const unsigned char *signature, int length)
-{
-       do {
-               if (readb(io_addr) != *signature)
-                       return 0;
-               io_addr++;
-               signature++;
-       } while (--length);
-       return 1;
-}
-
 /*
  * The Alpha Jensen hardware for some rather strange reason puts
  * the RTC clock at 0x170 instead of 0x70. Probably due to some
index 41c1bee342ad0ba04c5a4644a2e2c291c79ff78d..edc06598d187d4664f891f519d98a59b8669b924 100644 (file)
@@ -28,8 +28,8 @@
 /*
  * PCI space virtual addresses
  */
-#define VERSATILE_PCI_VIRT_BASE                0xe8000000
-#define VERSATILE_PCI_CFG_VIRT_BASE    0xe9000000
+#define VERSATILE_PCI_VIRT_BASE                (void __iomem *)0xe8000000ul
+#define VERSATILE_PCI_CFG_VIRT_BASE    (void __iomem *)0xe9000000ul
 
 #if 0
 #define VERSATILE_PCI_VIRT_MEM_BASE0   0xf4000000
index 34aaaac4f6177be0feabab55bfe1f3e7869d8286..ae999fd5dc679e74108bd81678e2eeea1e90c11c 100644 (file)
@@ -193,23 +193,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 #define eth_io_copy_and_sum(s,c,l,b) \
                                eth_copy_and_sum((s),__mem_pci(c),(l),(b))
 
-static inline int
-check_signature(void __iomem *io_addr, const unsigned char *signature,
-               int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
 #elif !defined(readb)
 
 #define readb(c)                       (__readwrite_bug("readb"),0)
index 87aba57a66c40d5b0f10fe199c3159fa5710a9e3..09ad0cab90149f3a7a17d755f92c8742ad0841a8 100644 (file)
@@ -110,7 +110,7 @@ extern int __get_user_4(void *);
 #define get_user(x,p)                                                  \
        ({                                                              \
                const register typeof(*(p)) __user *__p asm("r0") = (p);\
-               register unsigned int __r2 asm("r2");                   \
+               register unsigned long __r2 asm("r2");                  \
                register int __e asm("r0");                             \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
diff --git a/include/asm-avr32/irq_regs.h b/include/asm-avr32/irq_regs.h
new file mode 100644 (file)
index 0000000..3dd9c0b
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/irq_regs.h>
index e2247c22a638aec316a72383d7bc9f77e191dcb6..0f390f41f81680a70c12fe26cdae314644466344 100644 (file)
@@ -82,11 +82,11 @@ extern struct page *kmap_atomic_to_page(void *ptr);
        dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V;             \
                                                                                                \
        if (type != __KM_CACHE)                                                                 \
-               asm volatile("movgs %0,dampr"#ampr :: "r"(dampr));                              \
+               asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory");                   \
        else                                                                                    \
                asm volatile("movgs %0,iampr"#ampr"\n"                                          \
                             "movgs %0,dampr"#ampr"\n"                                          \
-                            :: "r"(dampr)                                                      \
+                            :: "r"(dampr) : "memory"                                           \
                             );                                                                 \
                                                                                                \
        asm("movsg damlr"#ampr",%0" : "=r"(damlr));                                             \
@@ -104,7 +104,7 @@ extern struct page *kmap_atomic_to_page(void *ptr);
        asm volatile("movgs %0,tplr \n"                                                           \
                     "movgs %1,tppr \n"                                                           \
                     "tlbpr %0,gr0,#2,#1"                                                         \
-                    : : "r"(damlr), "r"(dampr));                                                 \
+                    : : "r"(damlr), "r"(dampr) : "memory");                                      \
                                                                                                  \
        /*printk("TLB: SECN sl=%d L=%08lx P=%08lx\n", slot, damlr, dampr);*/                      \
                                                                                                  \
@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
 {
        unsigned long paddr;
 
-       preempt_disable();
+       inc_preempt_count();
        paddr = page_to_phys(page);
 
        switch (type) {
@@ -138,16 +138,16 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
        }
 }
 
-#define __kunmap_atomic_primary(type, ampr)                    \
-do {                                                           \
-       asm volatile("movgs gr0,dampr"#ampr"\n");               \
-       if (type == __KM_CACHE)                                 \
-               asm volatile("movgs gr0,iampr"#ampr"\n");       \
+#define __kunmap_atomic_primary(type, ampr)                            \
+do {                                                                   \
+       asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory");          \
+       if (type == __KM_CACHE)                                         \
+               asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory");  \
 } while(0)
 
-#define __kunmap_atomic_secondary(slot, vaddr)                 \
-do {                                                           \
-       asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr));      \
+#define __kunmap_atomic_secondary(slot, vaddr)                         \
+do {                                                                   \
+       asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");   \
 } while(0)
 
 static inline void kunmap_atomic(void *kvaddr, enum km_type type)
@@ -170,7 +170,8 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
        default:
                BUG();
        }
-       preempt_enable();
+       dec_preempt_count();
+       preempt_check_resched();
 }
 
 #endif /* !__ASSEMBLY__ */
index 7765f5528894009886a62e647a08d8eeb96fc1fa..20e44fe00abf66f6b74b0daf482bea26d1dcebc7 100644 (file)
@@ -385,27 +385,6 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
  */
 #define xlate_dev_kmem_ptr(p)  p
 
-/*
- * Check BIOS signature
- */
-static inline int check_signature(volatile void __iomem *io_addr,
-                                 const unsigned char *signature, int length)
-{
-       int retval = 0;
-
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-
-       retval = 1;
-out:
-       return retval;
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_IO_H */
index 5ef93a4d009fd6e43e506b5b3e9febc0e27ecaba..815bb01480601f7ca1a96e81113fb030df89062d 100644 (file)
@@ -15,7 +15,7 @@ static inline int sched_find_first_bit(const unsigned long *b)
 #if BITS_PER_LONG == 64
        if (unlikely(b[0]))
                return __ffs(b[0]);
-       if (unlikely(b[1]))
+       if (likely(b[1]))
                return __ffs(b[1]) + 64;
        return __ffs(b[2]) + 128;
 #elif BITS_PER_LONG == 32
index b3724fe93ff1008111dad9d1fc993defdafdda11..68df0dc3ab8ff3e52379a0e6a0afda589c9346b1 100644 (file)
@@ -224,33 +224,6 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
 
 #define eth_io_copy_and_sum(a,b,c,d)           eth_copy_and_sum((a),(void __force *)(b),(c),(d))
 
-/**
- *     check_signature         -       find BIOS signatures
- *     @io_addr: mmio address to check 
- *     @signature:  signature block
- *     @length: length of signature
- *
- *     Perform a signature comparison with the mmio address io_addr. This
- *     address should have been obtained by ioremap.
- *     Returns 1 on a match.
- */
-static inline int check_signature(volatile void __iomem * io_addr,
-       const unsigned char *signature, int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
 /*
  *     Cache management
  *
index 2277127696d264579687a2e693768a6e4d8ba206..e0ddca94d50c8f42c71e05e1796086fdf84ad0ef 100644 (file)
@@ -306,6 +306,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
                : :"a" (eax), "c" (ecx));
 }
 
+extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
+
 /* from system description table in BIOS.  Mostly for MCA use, but
 others may find it useful. */
 extern unsigned int machine_id;
index 54d905ebc63dd9738e00f592ee0787b371ef1e62..eef5133b9ce2a03b73b5f4c73f0e771a12b52aa8 100644 (file)
@@ -404,20 +404,6 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to,
  * anything, so this is accurate.
  */
 
-/**
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from kernel space to user space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
 static __always_inline unsigned long __must_check
 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 {
@@ -439,35 +425,27 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
        return __copy_to_user_ll(to, from, n);
 }
 
-static __always_inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-       might_sleep();
-       return __copy_to_user_inatomic(to, from, n);
-}
-
 /**
- * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
+ * __copy_to_user: - Copy a block of data into user space, with less checking.
+ * @to:   Destination address, in user space.
+ * @from: Source address, in kernel space.
  * @n:    Number of bytes to copy.
  *
  * Context: User context only.  This function may sleep.
  *
- * Copy data from user space to kernel space.  Caller must check
+ * Copy data from kernel space to user space.  Caller must check
  * the specified block with access_ok() before calling this function.
  *
  * Returns number of bytes that could not be copied.
  * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- *
- * An alternate version - __copy_from_user_inatomic() - may be called from
- * atomic context and will fail rather than sleep.  In this case the
- * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
- * for explanation of why this is needed.
  */
+static __always_inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       might_sleep();
+       return __copy_to_user_inatomic(to, from, n);
+}
+
 static __always_inline unsigned long
 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 {
@@ -493,6 +471,29 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
        }
        return __copy_from_user_ll_nozero(to, from, n);
 }
+
+/**
+ * __copy_from_user: - Copy a block of data from user space, with less checking.
+ * @to:   Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n:    Number of bytes to copy.
+ *
+ * Context: User context only.  This function may sleep.
+ *
+ * Copy data from user space to kernel space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ *
+ * An alternate version - __copy_from_user_inatomic() - may be called from
+ * atomic context and will fail rather than sleep.  In this case the
+ * uncopied bytes will *NOT* be padded with zeros.  See fs/filemap.h
+ * for explanation of why this is needed.
+ */
 static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
index 3ca7ab963d7d6ad2f152bd927314e15639223d29..beeeaf6b054a178db754757b6b9547fa784725d3 100644 (file)
 #define __NR_vmsplice          316
 #define __NR_move_pages                317
 #define __NR_getcpu            318
+#define __NR_epoll_pwait       319
 
 #ifdef __KERNEL__
 
-#define NR_syscalls 319
+#define NR_syscalls 320
 #include <linux/err.h>
 
 /*
index 4abfcfb91eb8446cf439af7df1cdd8aa7582375e..53100f35361280f31e6fcf1cdd60542b0d9cb191 100644 (file)
@@ -58,4 +58,4 @@ static const int VIC_CPI_Registers[] =
 
 #define VIC_BOOT_INTERRUPT_MASK                0xfe
 
-extern void smp_vic_timer_interrupt(struct pt_regs *regs);
+extern void smp_vic_timer_interrupt(void);
index e74c54aa757f8088e1b7fd65af3c298f92f15947..5b27838905b234fe2991b0f5c90c217a9552d55a 100644 (file)
@@ -118,33 +118,33 @@ typedef struct voyager_module {
 } voyager_module_t;
 
 typedef struct voyager_eeprom_hdr {
-        __u8  module_id[4] __attribute__((packed)); 
-        __u8  version_id __attribute__((packed));
-        __u8  config_id __attribute__((packed)); 
-        __u16 boundry_id __attribute__((packed));      /* boundary scan id */
-        __u16 ee_size __attribute__((packed));         /* size of EEPROM */
-        __u8  assembly[11] __attribute__((packed));    /* assembly # */
-        __u8  assembly_rev __attribute__((packed));    /* assembly rev */
-        __u8  tracer[4] __attribute__((packed));       /* tracer number */
-        __u16 assembly_cksum __attribute__((packed));  /* asm checksum */
-        __u16 power_consump __attribute__((packed));   /* pwr requirements */
-        __u16 num_asics __attribute__((packed));       /* number of asics */
-        __u16 bist_time __attribute__((packed));       /* min. bist time */
-        __u16 err_log_offset __attribute__((packed));  /* error log offset */
-        __u16 scan_path_offset __attribute__((packed));/* scan path offset */
-        __u16 cct_offset __attribute__((packed));
-        __u16 log_length __attribute__((packed));      /* length of err log */
-        __u16 xsum_end __attribute__((packed));        /* offset to end of
+        __u8  module_id[4];
+        __u8  version_id;
+        __u8  config_id;
+        __u16 boundry_id;      /* boundary scan id */
+        __u16 ee_size;         /* size of EEPROM */
+        __u8  assembly[11];    /* assembly # */
+        __u8  assembly_rev;    /* assembly rev */
+        __u8  tracer[4];       /* tracer number */
+        __u16 assembly_cksum;  /* asm checksum */
+        __u16 power_consump;   /* pwr requirements */
+        __u16 num_asics;       /* number of asics */
+        __u16 bist_time;       /* min. bist time */
+        __u16 err_log_offset;  /* error log offset */
+        __u16 scan_path_offset;/* scan path offset */
+        __u16 cct_offset;
+        __u16 log_length;      /* length of err log */
+        __u16 xsum_end;        /* offset to end of
                                                           checksum */
-        __u8  reserved[4] __attribute__((packed));
-        __u8  sflag __attribute__((packed));           /* starting sentinal */
-        __u8  part_number[13] __attribute__((packed)); /* prom part number */
-        __u8  version[10] __attribute__((packed));     /* version number */
-        __u8  signature[8] __attribute__((packed));
-        __u16 eeprom_chksum __attribute__((packed));
-        __u32  data_stamp_offset __attribute__((packed));
-        __u8  eflag  __attribute__((packed));           /* ending sentinal */
-} voyager_eprom_hdr_t;
+        __u8  reserved[4];
+        __u8  sflag;           /* starting sentinal */
+        __u8  part_number[13]; /* prom part number */
+        __u8  version[10];     /* version number */
+        __u8  signature[8];
+        __u16 eeprom_chksum;
+        __u32  data_stamp_offset;
+        __u8  eflag ;           /* ending sentinal */
+} __attribute__((packed)) voyager_eprom_hdr_t;
 
 
 
@@ -155,30 +155,30 @@ typedef struct voyager_eeprom_hdr {
  * in the module EPROMs.  We really only care about the IDs and
  * offsets */
 typedef struct voyager_sp_table {
-       __u8 asic_id __attribute__((packed));
-       __u8 bypass_flag __attribute__((packed));
-       __u16 asic_data_offset __attribute__((packed));
-       __u16 config_data_offset __attribute__((packed));
-} voyager_sp_table_t;
+       __u8 asic_id;
+       __u8 bypass_flag;
+       __u16 asic_data_offset;
+       __u16 config_data_offset;
+} __attribute__((packed)) voyager_sp_table_t;
 
 typedef struct voyager_jtag_table {
-       __u8 icode[4] __attribute__((packed));
-       __u8 runbist[4] __attribute__((packed));
-       __u8 intest[4] __attribute__((packed));
-       __u8 samp_preld[4] __attribute__((packed));
-       __u8 ireg_len __attribute__((packed));
-} voyager_jtt_t;
+       __u8 icode[4];
+       __u8 runbist[4];
+       __u8 intest[4];
+       __u8 samp_preld[4];
+       __u8 ireg_len;
+} __attribute__((packed)) voyager_jtt_t;
 
 typedef struct voyager_asic_data_table {
-       __u8 jtag_id[4] __attribute__((packed));
-       __u16 length_bsr __attribute__((packed));
-       __u16 length_bist_reg __attribute__((packed));
-       __u32 bist_clk __attribute__((packed));
-       __u16 subaddr_bits __attribute__((packed));
-       __u16 seed_bits __attribute__((packed));
-       __u16 sig_bits __attribute__((packed));
-       __u16 jtag_offset __attribute__((packed));
-} voyager_at_t;
+       __u8 jtag_id[4];
+       __u16 length_bsr;
+       __u16 length_bist_reg;
+       __u32 bist_clk;
+       __u16 subaddr_bits;
+       __u16 seed_bits;
+       __u16 sig_bits;
+       __u16 jtag_offset;
+} __attribute__((packed)) voyager_at_t;
 
 /* Voyager Interrupt Controller (VIC) registers */
 
@@ -328,52 +328,52 @@ struct voyager_bios_info {
 #define NUMBER_OF_POS_REGS     8
 
 typedef struct {
-       __u8    MC_Slot __attribute__((packed));
-       __u8    POS_Values[NUMBER_OF_POS_REGS] __attribute__((packed));
-} MC_SlotInformation_t;
+       __u8    MC_Slot;
+       __u8    POS_Values[NUMBER_OF_POS_REGS];
+} __attribute__((packed)) MC_SlotInformation_t;
 
 struct QuadDescription {
-       __u8  Type __attribute__((packed));     /* for type 0 (DYADIC or MONADIC) all fields
+       __u8  Type;     /* for type 0 (DYADIC or MONADIC) all fields
                          * will be zero except for slot */
-       __u8 StructureVersion __attribute__((packed));
-       __u32 CPI_BaseAddress __attribute__((packed));
-       __u32  LARC_BankSize __attribute__((packed));   
-       __u32 LocalMemoryStateBits __attribute__((packed));
-       __u8  Slot __attribute__((packed)); /* Processor slots 1 - 4 */
-}
+       __u8 StructureVersion;
+       __u32 CPI_BaseAddress;
+       __u32  LARC_BankSize;
+       __u32 LocalMemoryStateBits;
+       __u8  Slot; /* Processor slots 1 - 4 */
+} __attribute__((packed));
 
 struct ProcBoardInfo { 
-       __u8 Type __attribute__((packed));    
-       __u8 StructureVersion __attribute__((packed));
-       __u8 NumberOfBoards __attribute__((packed));
-       struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS] __attribute__((packed));
-};
+       __u8 Type;
+       __u8 StructureVersion;
+       __u8 NumberOfBoards;
+       struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS];
+} __attribute__((packed));
 
 struct CacheDescription {
-       __u8 Level __attribute__((packed));
-       __u32 TotalSize __attribute__((packed));
-       __u16 LineSize __attribute__((packed));
-       __u8  Associativity __attribute__((packed));
-       __u8  CacheType __attribute__((packed));
-       __u8  WriteType __attribute__((packed));
-       __u8  Number_CPUs_SharedBy __attribute__((packed));
-       __u8  Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS] __attribute__((packed));
+       __u8 Level;
+       __u32 TotalSize;
+       __u16 LineSize;
+       __u8  Associativity;
+       __u8  CacheType;
+       __u8  WriteType;
+       __u8  Number_CPUs_SharedBy;
+       __u8  Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS];
 
-};
+} __attribute__((packed));
 
 struct CPU_Description {
-       __u8 CPU_HardwareId __attribute__((packed));
-       char *FRU_String __attribute__((packed));
-       __u8 NumberOfCacheLevels __attribute__((packed));
-       struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS] __attribute__((packed));
-};
+       __u8 CPU_HardwareId;
+       char *FRU_String;
+       __u8 NumberOfCacheLevels;
+       struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS];
+} __attribute__((packed));
 
 struct CPU_Info {
-       __u8 Type __attribute__((packed));
-       __u8 StructureVersion __attribute__((packed));
-       __u8 NumberOf_CPUs __attribute__((packed));
-       struct CPU_Description CPU_Data[MAX_CPUS] __attribute__((packed));
-};
+       __u8 Type;
+       __u8 StructureVersion;
+       __u8 NumberOf_CPUs;
+       struct CPU_Description CPU_Data[MAX_CPUS];
+} __attribute__((packed));
 
 
 /*
index 70ad1c949c2b498cf691cdfc6326877ecb284681..d06933bd631825cfbbb096b92ecd0a166649aedc 100644 (file)
@@ -166,38 +166,6 @@ static inline void _writel(unsigned long l, unsigned long addr)
 
 #define flush_write_buffers() do { } while (0)  /* M32R_FIXME */
 
-/**
- *     check_signature         -       find BIOS signatures
- *     @io_addr: mmio address to check
- *     @signature:  signature block
- *     @length: length of signature
- *
- *     Perform a signature comparison with the ISA mmio address io_addr.
- *     Returns 1 on a match.
- *
- *     This function is deprecated. New drivers should use ioremap and
- *     check_signature.
- */
-
-static inline int check_signature(void __iomem *io_addr,
-        const unsigned char *signature, int length)
-{
-        int retval = 0;
-#if 0
-printk("check_signature\n");
-        do {
-                if (readb(io_addr) != *signature)
-                        goto out;
-                io_addr++;
-                signature++;
-                length--;
-        } while (length);
-        retval = 1;
-out:
-#endif
-        return retval;
-}
-
 static inline void
 memset_io(volatile void __iomem *addr, unsigned char val, int count)
 {
index 6c8c17d047a1a28b9533595991a75d69c92076f3..d8f17a0d8c9f6f872c513b6d5850c98e76d01465 100644 (file)
@@ -4,6 +4,7 @@
 #ifndef __SUN3_MMU_H__
 #define __SUN3_MMU_H__
 
+#include <linux/types.h>
 #include <asm/movs.h>
 #include <asm/sun3-head.h>
 
@@ -160,7 +161,7 @@ static inline void sun3_put_context(unsigned char c)
        return;
 }
 
-extern void *sun3_ioremap(unsigned long phys, unsigned long size,
+extern void __iomem *sun3_ioremap(unsigned long phys, unsigned long size,
                          unsigned long type);
 
 extern int sun3_map_test(unsigned long addr, char *val);
index 88b1f47400e17f8bfb4137e66c849373fef9fac2..e4c9f080ff20ad495254782cc80647bef4135aef 100644 (file)
@@ -76,7 +76,7 @@ asm volatile ("\n"                                    \
                break;                                                  \
        case 8:                                                         \
            {                                                           \
-               const void *__pu_ptr = (ptr);                           \
+               const void __user *__pu_ptr = (ptr);                    \
                asm volatile ("\n"                                      \
                        "1:     moves.l %2,(%1)+\n"                     \
                        "2:     moves.l %R2,(%1)\n"                     \
@@ -125,7 +125,7 @@ asm volatile ("\n"                                  \
                "       .previous"                              \
                : "+d" (res), "=&" #reg (__gu_val)              \
                : "m" (*(ptr)), "i" (err));                     \
-       (x) = (typeof(*(ptr)))(long)__gu_val;                   \
+       (x) = (typeof(*(ptr)))(unsigned long)__gu_val;          \
 })
 
 #define __get_user(x, ptr)                                             \
@@ -221,16 +221,16 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 
        switch (n) {
        case 1:
-               __get_user_asm(res, *(u8 *)to, (u8 *)from, u8, b, d, 1);
+               __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
                break;
        case 2:
-               __get_user_asm(res, *(u16 *)to, (u16 *)from, u16, w, d, 2);
+               __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, d, 2);
                break;
        case 3:
                __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
                break;
        case 4:
-               __get_user_asm(res, *(u32 *)to, (u32 *)from, u32, l, r, 4);
+               __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4);
                break;
        case 5:
                __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
@@ -302,16 +302,16 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 
        switch (n) {
        case 1:
-               __put_user_asm(res, *(u8 *)from, (u8 *)to, b, d, 1);
+               __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
                break;
        case 2:
-               __put_user_asm(res, *(u16 *)from, (u16 *)to, w, d, 2);
+               __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
                break;
        case 3:
                __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
                break;
        case 4:
-               __put_user_asm(res, *(u32 *)from, (u32 *)to, l, r, 4);
+               __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
                break;
        case 5:
                __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
index daafb5d43ef196c6fbc829d78ad0cdf508562c6e..ebaf03197114d0cd831a85cdddf3a11dec6db3c2 100644 (file)
 #define __NR_mq_notify         275
 #define __NR_mq_getsetattr     276
 #define __NR_waitid            277
-#define __NR_sys_setaltroot    278
+#define __NR_vserver           278
 #define __NR_add_key           279
 #define __NR_request_key       280
 #define __NR_keyctl            281
+#define __NR_ioprio_set                282
+#define __NR_ioprio_get                283
+#define __NR_inotify_init      284
+#define __NR_inotify_add_watch 285
+#define __NR_inotify_rm_watch  286
+#define __NR_migrate_pages     287
+#define __NR_openat            288
+#define __NR_mkdirat           289
+#define __NR_mknodat           290
+#define __NR_fchownat          291
+#define __NR_futimesat         292
+#define __NR_fstatat64         293
+#define __NR_unlinkat          294
+#define __NR_renameat          295
+#define __NR_linkat            296
+#define __NR_symlinkat         297
+#define __NR_readlinkat                298
+#define __NR_fchmodat          299
+#define __NR_faccessat         300
+#define __NR_pselect6          301
+#define __NR_ppoll             302
+#define __NR_unshare           303
+#define __NR_set_robust_list   304
+#define __NR_get_robust_list   305
+#define __NR_splice            306
+#define __NR_sync_file_range   307
+#define __NR_tee               308
+#define __NR_vmsplice          309
+#define __NR_move_pages                310
+
 #ifdef __KERNEL__
 
-#define NR_syscalls            282
+#define NR_syscalls            311
 #include <linux/err.h>
 
 /* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
index df624e1ee6e2eb278931b6ecf56e3a5ea494dbcd..c2d124badbe566cfc08e1c76a754709ab1d6ec9a 100644 (file)
@@ -561,32 +561,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
  */
 #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
 
-/*
- *     check_signature         -       find BIOS signatures
- *     @io_addr: mmio address to check
- *     @signature:  signature block
- *     @length: length of signature
- *
- *     Perform a signature comparison with the mmio address io_addr. This
- *     address should have been obtained by ioremap.
- *     Returns 1 on a match.
- */
-static inline int check_signature(char __iomem *io_addr,
-       const unsigned char *signature, int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
 /*
  * The caches on some architectures aren't dma-coherent and have need to
  * handle this in software.  There are three types of operations that
index 1a9804c65369dadad668fdca7b8faea56a00600d..0ce2a80b689e5da23a72ed6f17bfe01458325ed5 100644 (file)
@@ -24,8 +24,6 @@ static inline int irq_canonicalize(int irq)
 #define irq_canonicalize(irq) (irq)    /* Sane hardware, sane code ... */
 #endif
 
-struct pt_regs;
-
 extern asmlinkage unsigned int do_IRQ(unsigned int irq);
 
 #ifdef CONFIG_MIPS_MT_SMTC
index 158a4cd12e460a0dfb7a4a5ee56fc25ee28008f2..1fae5dc581381cd4de2b1022023a53c1acf6647a 100644 (file)
                .endm
 
 #ifdef CONFIG_SMP
-               .macro  get_saved_sp    /* SMP variation */
-#ifdef CONFIG_32BIT
 #ifdef CONFIG_MIPS_MT_SMTC
-               .set    mips32
-               mfc0    k0, CP0_TCBIND;
-               .set    mips0
-               lui     k1, %hi(kernelsp)
-               srl     k0, k0, 19
-               /* No need to shift down and up to clear bits 0-1 */
+#define PTEBASE_SHIFT  19      /* TCBIND */
 #else
-               mfc0    k0, CP0_CONTEXT
-               lui     k1, %hi(kernelsp)
-               srl     k0, k0, 23
-#endif
-               addu    k1, k0
-               LONG_L  k1, %lo(kernelsp)(k1)
+#define PTEBASE_SHIFT  23      /* CONTEXT */
 #endif
-#ifdef CONFIG_64BIT
+               .macro  get_saved_sp    /* SMP variation */
 #ifdef CONFIG_MIPS_MT_SMTC
-               .set    mips64
-               mfc0    k0, CP0_TCBIND;
-               .set    mips0
-               lui     k0, %highest(kernelsp)
-               dsrl    k1, 19
-               /* No need to shift down and up to clear bits 0-2 */
+               mfc0    k0, CP0_TCBIND
 #else
-               MFC0    k1, CP0_CONTEXT
-               lui     k0, %highest(kernelsp)
-               dsrl    k1, 23
-               daddiu  k0, %higher(kernelsp)
-               dsll    k0, k0, 16
-               daddiu  k0, %hi(kernelsp)
-               dsll    k0, k0, 16
-#endif /* CONFIG_MIPS_MT_SMTC */
-               daddu   k1, k1, k0
+               MFC0    k0, CP0_CONTEXT
+#endif
+#if defined(CONFIG_BUILD_ELF64) || (defined(CONFIG_64BIT) && __GNUC__ < 4)
+               lui     k1, %highest(kernelsp)
+               daddiu  k1, %higher(kernelsp)
+               dsll    k1, 16
+               daddiu  k1, %hi(kernelsp)
+               dsll    k1, 16
+#else
+               lui     k1, %hi(kernelsp)
+#endif
+               LONG_SRL        k0, PTEBASE_SHIFT
+               LONG_ADDU       k1, k0
                LONG_L  k1, %lo(kernelsp)(k1)
-#endif /* CONFIG_64BIT */
                .endm
 
                .macro  set_saved_sp stackp temp temp2
-#ifdef CONFIG_32BIT
-#ifdef CONFIG_MIPS_MT_SMTC
-               mfc0    \temp, CP0_TCBIND
-               srl     \temp, 19
-#else
-               mfc0    \temp, CP0_CONTEXT
-               srl     \temp, 23
-#endif
-#endif
-#ifdef CONFIG_64BIT
 #ifdef CONFIG_MIPS_MT_SMTC
                mfc0    \temp, CP0_TCBIND
-               dsrl    \temp, 19
 #else
                MFC0    \temp, CP0_CONTEXT
-               dsrl    \temp, 23
-#endif
 #endif
+               LONG_SRL        \temp, PTEBASE_SHIFT
                LONG_S  \stackp, kernelsp(\temp)
                .endm
 #else
                .macro  get_saved_sp    /* Uniprocessor variation */
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_BUILD_ELF64) || (defined(CONFIG_64BIT) && __GNUC__ < 4)
                lui     k1, %highest(kernelsp)
                daddiu  k1, %higher(kernelsp)
                dsll    k1, k1, 16
index fa6d04dac56bfc958273bbdde28a9e0bd4d28072..b62ec7c521cc3807793aa552e41fc0a20591c6ba 100644 (file)
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 1995, 1996, 1999, 2001 Ralf Baechle
+ * Copyright (C) 1995, 96, 99, 2001, 06 Ralf Baechle
  * Copyright (C) 1999 Silicon Graphics, Inc.
  * Copyright (C) 2001 MIPS Technologies, Inc.
  */
 #include <linux/posix_types.h>
 
 typedef unsigned char cc_t;
-#if (_MIPS_SZLONG == 32)
-typedef unsigned long speed_t;
-typedef unsigned long tcflag_t;
-#endif
-#if (_MIPS_SZLONG == 64)
-typedef __u32 speed_t;
-typedef __u32 tcflag_t;
-#endif
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
 
 /*
  * The ABI says nothing about NCC but seems to use NCCS as
index cbbd8c648df1341120e825b29c9d33a9763d234b..3baff8b0fd5add68cb7dfde5e1324ac182a7801e 100644 (file)
@@ -404,32 +404,6 @@ static inline void __out_be64(volatile unsigned long __iomem *addr, unsigned lon
 
 #include <asm/eeh.h>
 
-/**
- *     check_signature         -       find BIOS signatures
- *     @io_addr: mmio address to check
- *     @signature:  signature block
- *     @length: length of signature
- *
- *     Perform a signature comparison with the mmio address io_addr. This
- *     address should have been obtained by ioremap.
- *     Returns 1 on a match.
- */
-static inline int check_signature(const volatile void __iomem * io_addr,
-       const unsigned char *signature, int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
 /* Nothing to do */
 
 #define dma_cache_inv(_start,_size)            do { } while (0)
index 3d9a9e6f33217b10c63e5dacf4171703e1261562..a4c411b753efe945416a3b6e12d9725b8eb35c42 100644 (file)
@@ -439,22 +439,6 @@ extern inline void * phys_to_virt(unsigned long address)
 #define iobarrier_r()  eieio()
 #define iobarrier_w()  eieio()
 
-static inline int check_signature(volatile void __iomem * io_addr,
-       const unsigned char *signature, int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
 /*
  * Here comes the ppc implementation of the IOMAP 
  * interfaces.
index da063cd5f0a006cbb5568d5d0261cbd931cdc446..81287d86329d0be4e44e491b0e059b2983422765 100644 (file)
@@ -275,6 +275,12 @@ struct ccw_dev_id {
        u16 devno;
 };
 
+static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
+                                     struct ccw_dev_id *dev_id2)
+{
+       return !memcmp(dev_id1, dev_id2, sizeof(struct ccw_dev_id));
+}
+
 extern int diag210(struct diag210 *addr);
 
 extern void wait_cons_dev(void);
index fcd6c256a2d194aa4b1269f89cfc25fabb8c96a6..30e5cbe570f2db3d29f73e282a6ef10bd57b4063 100644 (file)
@@ -26,7 +26,7 @@ struct vtimer_list {
        spinlock_t lock;
        unsigned long magic;
 
-       void (*function)(unsigned long, struct pt_regs*);
+       void (*function)(unsigned long);
        unsigned long data;
 };
 
index 3d0943167659104ba5f8149c4dfbfa014b8805d5..c86e1705093570e0ec66d5e405b9eff29e3a7b11 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Copyright (C) 1999 Niibe Yutaka
  * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
 #ifndef __ASM_CPU_SH4_UBC_H
 #define __ASM_CPU_SH4_UBC_H
 
+#if defined(CONFIG_CPU_SH4A)
+#define UBC_CBR0               0xff200000
+#define UBC_CRR0               0xff200004
+#define UBC_CAR0               0xff200008
+#define UBC_CAMR0              0xff20000c
+#define UBC_CBR1               0xff200020
+#define UBC_CRR1               0xff200024
+#define UBC_CAR1               0xff200028
+#define UBC_CAMR1              0xff20002c
+#define UBC_CDR1               0xff200030
+#define UBC_CDMR1              0xff200034
+#define UBC_CETR1              0xff200038
+#define UBC_CCMFR              0xff200600
+#define UBC_CBCR               0xff200620
+
+/* CBR */
+#define UBC_CBR_AIE            (0x01<<30)
+#define UBC_CBR_ID_INST                (0x01<<4)
+#define UBC_CBR_RW_READ                (0x01<<1)
+#define UBC_CBR_CE             (0x01)
+
+#define        UBC_CBR_AIV_MASK        (0x00FF0000)
+#define        UBC_CBR_AIV_SHIFT       (16)
+#define UBC_CBR_AIV_SET(asid)  (((asid)<<UBC_CBR_AIV_SHIFT) & UBC_CBR_AIV_MASK)
+
+#define UBC_CBR_INIT           0x20000000
+
+/* CRR */
+#define UBC_CRR_RES            (0x01<<13)
+#define UBC_CRR_PCB            (0x01<<1)
+#define UBC_CRR_BIE            (0x01)
+
+#define UBC_CRR_INIT           0x00002000
+
+#else  /* CONFIG_CPU_SH4 */
 #define UBC_BARA               0xff200000
 #define UBC_BAMRA              0xff200004
 #define UBC_BBRA               0xff200008
@@ -22,6 +58,7 @@
 #define UBC_BDRB               0xff200018
 #define UBC_BDMRB              0xff20001c
 #define UBC_BRCR               0xff200020
+#endif /* CONFIG_CPU_SH4 */
 
 #endif /* __ASM_CPU_SH4_UBC_H */
 
index fed26616967a03979d0d6ea6e7c14d1d6a446f97..80ee1cda7498f2383cd72588a800c66ad68178e3 100644 (file)
@@ -1,4 +1,8 @@
 #ifndef __ASM_SH_HW_IRQ_H
 #define __ASM_SH_HW_IRQ_H
 
+#include <asm/atomic.h>
+
+extern atomic_t irq_err_count;
+
 #endif /* __ASM_SH_HW_IRQ_H */
index ed12d38e8c0082dda8db85613bf578f4819e3ef9..a0e55b09e4fd773b30d9acf30e2e89ed60f72340 100644 (file)
@@ -304,22 +304,6 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
 #define iounmap(addr)                                  \
        __iounmap((addr))
 
-static inline int check_signature(char __iomem *io_addr,
-                       const unsigned char *signature, int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
 /*
  * The caches on some architectures aren't dma-coherent and have need to
  * handle this in software.  There are three types of operations that
index 0e5f365aff70a63e2f2e639101e5b46e26708ce7..28996f9c58ccf81ff1c74d7b3e232869cd564c00 100644 (file)
@@ -697,13 +697,15 @@ extern int ipr_irq_demux(int irq);
 
 #define INTC2_INTPRI_OFFSET    0x00
 
-void make_intc2_irq(unsigned int irq,
-                   unsigned int ipr_offset, unsigned int ipr_shift,
-                   unsigned int msk_offset, unsigned int msk_shift,
-                   unsigned int priority);
+struct intc2_data {
+       unsigned short irq;
+       unsigned char ipr_offset, ipr_shift;
+       unsigned char msk_offset, msk_shift;
+       unsigned char priority;
+};
+
+void make_intc2_irq(struct intc2_data *);
 void init_IRQ_intc2(void);
-void intc2_add_clear_irq(int irq, int (*fn)(int));
-
 #endif
 
 extern int shmse_irq_demux(int irq);
diff --git a/include/asm-sh/irq_regs.h b/include/asm-sh/irq_regs.h
new file mode 100644 (file)
index 0000000..3dd9c0b
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/irq_regs.h>
index c7ab28095ba0ea4ffabe410108d57a33c9f5e56b..5df842bcf7b63d554ef6486b11e07a663514c373 100644 (file)
@@ -8,8 +8,9 @@ struct sys_timer_ops {
        int (*init)(void);
        int (*start)(void);
        int (*stop)(void);
+#ifndef CONFIG_GENERIC_TIME
        unsigned long (*get_offset)(void);
-       unsigned long (*get_frequency)(void);
+#endif
 };
 
 struct sys_timer {
@@ -24,21 +25,17 @@ struct sys_timer {
 extern struct sys_timer tmu_timer;
 extern struct sys_timer *sys_timer;
 
+#ifndef CONFIG_GENERIC_TIME
 static inline unsigned long get_timer_offset(void)
 {
        return sys_timer->ops->get_offset();
 }
-
-static inline unsigned long get_timer_frequency(void)
-{
-       return sys_timer->ops->get_frequency();
-}
+#endif
 
 /* arch/sh/kernel/timers/timer.c */
 struct sys_timer *get_sys_timer(void);
 
 /* arch/sh/kernel/time.c */
-void handle_timer_tick(struct pt_regs *);
+void handle_timer_tick(void);
 
 #endif /* __ASM_SH_TIMER_H */
-
index 252fedbb6621e4cbd5df0f815d51ef7b4414a636..14d8e7b4bf4b369254d49a0f458ec7c8b8a17c59 100644 (file)
@@ -178,22 +178,6 @@ extern void iounmap(void *addr);
 unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
 extern void onchip_unmap(unsigned long vaddr);
 
-static __inline__ int check_signature(volatile void __iomem *io_addr,
-                       const unsigned char *signature, int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
 /*
  * The caches on some architectures aren't dma-coherent and have need to
  * handle this in software.  There are three types of operations that
index 0056770e83ada176fc091dba50be956dcaf7841f..30b912d8e8bc4a0ed0a0ad6bf6e1221820950197 100644 (file)
@@ -440,21 +440,6 @@ _memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
 
 #define memcpy_toio(d,s,sz)    _memcpy_toio(d,s,sz)
 
-static inline int check_signature(void __iomem *io_addr,
-                                 const unsigned char *signature,
-                                 int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature++)
-                       goto out;
-               io_addr++;
-       } while (--length);
-       retval = 1;
-out:
-       return retval;
-}
-
 #define mmiowb()
 
 #ifdef __KERNEL__
index 70e91fe7634485708815dda0c851c2f066064a1c..6ee9fadaaacb29a3a7cba086c37f4f7bd94e6eaf 100644 (file)
@@ -254,33 +254,6 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
 
 #define eth_io_copy_and_sum(a,b,c,d)           eth_copy_and_sum((a),(void *)(b),(c),(d))
 
-/**
- *     check_signature         -       find BIOS signatures
- *     @io_addr: mmio address to check 
- *     @signature:  signature block
- *     @length: length of signature
- *
- *     Perform a signature comparison with the mmio address io_addr. This
- *     address should have been obtained by ioremap.
- *     Returns 1 on a match.
- */
-static inline int check_signature(void __iomem *io_addr,
-       const unsigned char *signature, int length)
-{
-       int retval = 0;
-       do {
-               if (readb(io_addr) != *signature)
-                       goto out;
-               io_addr++;
-               signature++;
-               length--;
-       } while (length);
-       retval = 1;
-out:
-       return retval;
-}
-
 /* Nothing to do */
 
 #define dma_cache_inv(_start,_size)            do { } while (0)
index de9c3147ee4c3dddb639210dc17a9b74b406a040..cef17e0f828cc5d0080cf967d11e80a228f4d18b 100644 (file)
@@ -475,6 +475,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
                : :"a" (eax), "c" (ecx));
 }
 
+extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
+
 #define stack_current() \
 ({                                                             \
        struct thread_info *ti;                                 \
index 88b5dfd8ee125be2c3a025c7ea1e03183eaccbe4..2b0c955590fec1dacf875c4e0dd21d4bc0269c5e 100644 (file)
@@ -494,6 +494,9 @@ void acpi_pci_unregister_driver(struct acpi_pci_driver *driver);
 
 extern int ec_read(u8 addr, u8 *val);
 extern int ec_write(u8 addr, u8 val);
+extern int ec_transaction(u8 command,
+                          const u8 *wdata, unsigned wdata_len,
+                          u8 *rdata, unsigned rdata_len);
 
 #endif /*CONFIG_ACPI_EC*/
 
index dcc5de7cc487673f786f25c2598091549121d351..64b4641904fee0415c169d3f20e3bd4cdba71f5c 100644 (file)
@@ -46,7 +46,8 @@
  * bitmap_remap(dst, src, old, new, nbits)     *dst = map(old, new)(src)
  * bitmap_bitremap(oldbit, old, new, nbits)    newbit = map(old, new)(oldbit)
  * bitmap_scnprintf(buf, len, src, nbits)      Print bitmap src to buf
- * bitmap_parse(ubuf, ulen, dst, nbits)                Parse bitmap dst from user buf
+ * bitmap_parse(buf, buflen, dst, nbits)       Parse bitmap dst from kernel buf
+ * bitmap_parse_user(ubuf, ulen, dst, nbits)   Parse bitmap dst from user buf
  * bitmap_scnlistprintf(buf, len, src, nbits)  Print bitmap src as list to buf
  * bitmap_parselist(buf, dst, nbits)           Parse bitmap dst from list
  * bitmap_find_free_region(bitmap, bits, order)        Find and allocate bit region
@@ -106,7 +107,9 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits);
 
 extern int bitmap_scnprintf(char *buf, unsigned int len,
                        const unsigned long *src, int nbits);
-extern int bitmap_parse(const char __user *ubuf, unsigned int ulen,
+extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
+                       unsigned long *dst, int nbits);
+extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
                        unsigned long *dst, int nbits);
 extern int bitmap_scnlistprintf(char *buf, unsigned int len,
                        const unsigned long *src, int nbits);
@@ -270,6 +273,12 @@ static inline void bitmap_shift_left(unsigned long *dst,
                __bitmap_shift_left(dst, src, n, nbits);
 }
 
+static inline int bitmap_parse(const char *buf, unsigned int buflen,
+                       unsigned long *maskp, int nmaskbits)
+{
+       return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __LINUX_BITMAP_H */
index 26f7856ff8123e4b249ca985505fcd80d7d367d3..d370d2cfe13803ed79626e90194e270cc0c6c169 100644 (file)
@@ -157,6 +157,7 @@ enum rq_cmd_type_bits {
        REQ_TYPE_ATA_CMD,
        REQ_TYPE_ATA_TASK,
        REQ_TYPE_ATA_TASKFILE,
+       REQ_TYPE_ATA_PC,
 };
 
 /*
index 131ffd37e716fb7ac4a386eaea9ff58c26bf639d..5d9fb0e94156235eca46e199352933fc638b9596 100644 (file)
@@ -69,6 +69,8 @@ struct buffer_head {
        bh_end_io_t *b_end_io;          /* I/O completion */
        void *b_private;                /* reserved for b_end_io */
        struct list_head b_assoc_buffers; /* associated with another mapping */
+       struct address_space *b_assoc_map;      /* mapping this buffer is
+                                                  associated with */
        atomic_t b_count;               /* users using this buffer_head */
 };
 
diff --git a/include/linux/carta_random32.h b/include/linux/carta_random32.h
new file mode 100644 (file)
index 0000000..f6f3bd9
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Fast, simple, yet decent quality random number generator based on
+ * a paper by David G. Carta ("Two Fast Implementations of the
+ * `Minimal Standard' Random Number Generator," Communications of the
+ * ACM, January, 1990).
+ *
+ * Copyright (c) 2002-2006 Hewlett-Packard Development Company, L.P.
+ *     Contributed by Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307 USA
+ */
+#ifndef _LINUX_CARTA_RANDOM32_H_
+#define _LINUX_CARTA_RANDOM32_H_
+
+u64 carta_random32(u64 seed);
+
+#endif /* _LINUX_CARTA_RANDOM32_H_ */
index 4e1663d7691e4a44c676a446d2c94374963656b3..cfdb4f6a89d4a0edd2aa5ec65cf88ec88787eb13 100644 (file)
@@ -61,17 +61,23 @@ COMPATIBLE_IOCTL(FIGETBSZ)
  *         Some need translations, these do not.
  */
 COMPATIBLE_IOCTL(HDIO_GET_IDENTITY)
-COMPATIBLE_IOCTL(HDIO_SET_DMA)
-COMPATIBLE_IOCTL(HDIO_SET_UNMASKINTR)
-COMPATIBLE_IOCTL(HDIO_SET_NOWERR)
-COMPATIBLE_IOCTL(HDIO_SET_32BIT)
-COMPATIBLE_IOCTL(HDIO_SET_MULTCOUNT)
-COMPATIBLE_IOCTL(HDIO_DRIVE_CMD)
 COMPATIBLE_IOCTL(HDIO_DRIVE_TASK)
-COMPATIBLE_IOCTL(HDIO_SET_PIO_MODE)
-COMPATIBLE_IOCTL(HDIO_SET_NICE)
-COMPATIBLE_IOCTL(HDIO_SET_KEEPSETTINGS)
+COMPATIBLE_IOCTL(HDIO_DRIVE_CMD)
+ULONG_IOCTL(HDIO_SET_MULTCOUNT)
+ULONG_IOCTL(HDIO_SET_UNMASKINTR)
+ULONG_IOCTL(HDIO_SET_KEEPSETTINGS)
+ULONG_IOCTL(HDIO_SET_32BIT)
+ULONG_IOCTL(HDIO_SET_NOWERR)
+ULONG_IOCTL(HDIO_SET_DMA)
+ULONG_IOCTL(HDIO_SET_PIO_MODE)
+ULONG_IOCTL(HDIO_SET_NICE)
+ULONG_IOCTL(HDIO_SET_WCACHE)
+ULONG_IOCTL(HDIO_SET_ACOUSTIC)
+ULONG_IOCTL(HDIO_SET_BUSSTATE)
+ULONG_IOCTL(HDIO_SET_ADDRESS)
 COMPATIBLE_IOCTL(HDIO_SCAN_HWIF)
+/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
+COMPATIBLE_IOCTL(0x330)
 /* 0x02 -- Floppy ioctls */
 COMPATIBLE_IOCTL(FDMSGON)
 COMPATIBLE_IOCTL(FDMSGOFF)
index b268a3c0c37628d23231754f1ad9047ac47a74b8..d0e8c8b0e34dee03a78823c81d2c7536ce88e2cc 100644 (file)
@@ -8,8 +8,8 @@
  * See detailed comments in the file linux/bitmap.h describing the
  * data type on which these cpumasks are based.
  *
- * For details of cpumask_scnprintf() and cpumask_parse(),
- * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ * For details of cpumask_scnprintf() and cpumask_parse_user(),
+ * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
  * For details of cpulist_scnprintf() and cpulist_parse(), see
  * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
  * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
@@ -49,7 +49,7 @@
  * unsigned long *cpus_addr(mask)      Array of unsigned long's in mask
  *
  * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
- * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask
+ * int cpumask_parse_user(ubuf, ulen, mask)    Parse ascii string as cpumask
  * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
  * int cpulist_parse(buf, map)         Parse ascii string as cpulist
  * int cpu_remap(oldbit, old, new)     newbit = map(old, new)(oldbit)
@@ -273,12 +273,12 @@ static inline int __cpumask_scnprintf(char *buf, int len,
        return bitmap_scnprintf(buf, len, srcp->bits, nbits);
 }
 
-#define cpumask_parse(ubuf, ulen, dst) \
-                       __cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS)
-static inline int __cpumask_parse(const char __user *buf, int len,
+#define cpumask_parse_user(ubuf, ulen, dst) \
+                       __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS)
+static inline int __cpumask_parse_user(const char __user *buf, int len,
                                        cpumask_t *dstp, int nbits)
 {
-       return bitmap_parse(buf, len, dstp->bits, nbits);
+       return bitmap_parse_user(buf, len, dstp->bits, nbits);
 }
 
 #define cpulist_scnprintf(buf, len, src) \
index 44605be5940902caac298dc796d0d8376ff89686..63f64a9a5bf7b207b674be433d1c3b218ff39813 100644 (file)
@@ -230,6 +230,7 @@ extern struct dentry * d_alloc_anon(struct inode *);
 extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
 extern void shrink_dcache_sb(struct super_block *);
 extern void shrink_dcache_parent(struct dentry *);
+extern void shrink_dcache_for_umount(struct super_block *);
 extern int d_invalidate(struct dentry *);
 
 /* only used at mount-time */
index b3370ef5164d0589d3300e91162c0b5599045de1..2fa9f1144228e7270ab626b2daf887ff0eb1b1b9 100644 (file)
@@ -70,7 +70,6 @@ struct elevator_type
 {
        struct list_head list;
        struct elevator_ops ops;
-       struct elevator_type *elevator_type;
        struct elv_fs_entry *elevator_attrs;
        char elevator_name[ELV_NAME_MAX];
        struct module *elevator_owner;
diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h
new file mode 100644 (file)
index 0000000..498503e
--- /dev/null
@@ -0,0 +1,994 @@
+/*
+ *  linux/include/linux/ext4_fs.h
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/include/linux/minix_fs.h
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#ifndef _LINUX_EXT4_FS_H
+#define _LINUX_EXT4_FS_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/magic.h>
+
+/*
+ * The second extended filesystem constants/structures
+ */
+
+/*
+ * Define EXT4FS_DEBUG to produce debug messages
+ */
+#undef EXT4FS_DEBUG
+
+/*
+ * Define EXT4_RESERVATION to reserve data blocks for expanding files
+ */
+#define EXT4_DEFAULT_RESERVE_BLOCKS     8
+/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
+#define EXT4_MAX_RESERVE_BLOCKS         1027
+#define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0
+/*
+ * Always enable hashed directories
+ */
+#define CONFIG_EXT4_INDEX
+
+/*
+ * Debug code
+ */
+#ifdef EXT4FS_DEBUG
+#define ext4_debug(f, a...)                                            \
+       do {                                                            \
+               printk (KERN_DEBUG "EXT4-fs DEBUG (%s, %d): %s:",       \
+                       __FILE__, __LINE__, __FUNCTION__);              \
+               printk (KERN_DEBUG f, ## a);                            \
+       } while (0)
+#else
+#define ext4_debug(f, a...)    do {} while (0)
+#endif
+
+/*
+ * Special inodes numbers
+ */
+#define        EXT4_BAD_INO             1      /* Bad blocks inode */
+#define EXT4_ROOT_INO           2      /* Root inode */
+#define EXT4_BOOT_LOADER_INO    5      /* Boot loader inode */
+#define EXT4_UNDEL_DIR_INO      6      /* Undelete directory inode */
+#define EXT4_RESIZE_INO                 7      /* Reserved group descriptors inode */
+#define EXT4_JOURNAL_INO        8      /* Journal inode */
+
+/* First non-reserved inode for old ext4 filesystems */
+#define EXT4_GOOD_OLD_FIRST_INO        11
+
+/*
+ * Maximal count of links to a file
+ */
+#define EXT4_LINK_MAX          32000
+
+/*
+ * Macro-instructions used to manage several block sizes
+ */
+#define EXT4_MIN_BLOCK_SIZE            1024
+#define        EXT4_MAX_BLOCK_SIZE             4096
+#define EXT4_MIN_BLOCK_LOG_SIZE                  10
+#ifdef __KERNEL__
+# define EXT4_BLOCK_SIZE(s)            ((s)->s_blocksize)
+#else
+# define EXT4_BLOCK_SIZE(s)            (EXT4_MIN_BLOCK_SIZE << (s)->s_log_block_size)
+#endif
+#define        EXT4_ADDR_PER_BLOCK(s)          (EXT4_BLOCK_SIZE(s) / sizeof (__u32))
+#ifdef __KERNEL__
+# define EXT4_BLOCK_SIZE_BITS(s)       ((s)->s_blocksize_bits)
+#else
+# define EXT4_BLOCK_SIZE_BITS(s)       ((s)->s_log_block_size + 10)
+#endif
+#ifdef __KERNEL__
+#define        EXT4_ADDR_PER_BLOCK_BITS(s)     (EXT4_SB(s)->s_addr_per_block_bits)
+#define EXT4_INODE_SIZE(s)             (EXT4_SB(s)->s_inode_size)
+#define EXT4_FIRST_INO(s)              (EXT4_SB(s)->s_first_ino)
+#else
+#define EXT4_INODE_SIZE(s)     (((s)->s_rev_level == EXT4_GOOD_OLD_REV) ? \
+                                EXT4_GOOD_OLD_INODE_SIZE : \
+                                (s)->s_inode_size)
+#define EXT4_FIRST_INO(s)      (((s)->s_rev_level == EXT4_GOOD_OLD_REV) ? \
+                                EXT4_GOOD_OLD_FIRST_INO : \
+                                (s)->s_first_ino)
+#endif
+
+/*
+ * Macro-instructions used to manage fragments
+ */
+#define EXT4_MIN_FRAG_SIZE             1024
+#define        EXT4_MAX_FRAG_SIZE              4096
+#define EXT4_MIN_FRAG_LOG_SIZE           10
+#ifdef __KERNEL__
+# define EXT4_FRAG_SIZE(s)             (EXT4_SB(s)->s_frag_size)
+# define EXT4_FRAGS_PER_BLOCK(s)       (EXT4_SB(s)->s_frags_per_block)
+#else
+# define EXT4_FRAG_SIZE(s)             (EXT4_MIN_FRAG_SIZE << (s)->s_log_frag_size)
+# define EXT4_FRAGS_PER_BLOCK(s)       (EXT4_BLOCK_SIZE(s) / EXT4_FRAG_SIZE(s))
+#endif
+
+/*
+ * Structure of a blocks group descriptor
+ */
+struct ext4_group_desc
+{
+       __le32  bg_block_bitmap;                /* Blocks bitmap block */
+       __le32  bg_inode_bitmap;                /* Inodes bitmap block */
+       __le32  bg_inode_table;         /* Inodes table block */
+       __le16  bg_free_blocks_count;   /* Free blocks count */
+       __le16  bg_free_inodes_count;   /* Free inodes count */
+       __le16  bg_used_dirs_count;     /* Directories count */
+       __u16   bg_flags;
+       __u32   bg_reserved[3];
+       __le32  bg_block_bitmap_hi;     /* Blocks bitmap block MSB */
+       __le32  bg_inode_bitmap_hi;     /* Inodes bitmap block MSB */
+       __le32  bg_inode_table_hi;      /* Inodes table block MSB */
+};
+
+#ifdef __KERNEL__
+#include <linux/ext4_fs_i.h>
+#include <linux/ext4_fs_sb.h>
+#endif
+/*
+ * Macro-instructions used to manage group descriptors
+ */
+#define EXT4_MIN_DESC_SIZE             32
+#define EXT4_MIN_DESC_SIZE_64BIT       64
+#define        EXT4_MAX_DESC_SIZE              EXT4_MIN_BLOCK_SIZE
+#define EXT4_DESC_SIZE(s)              (EXT4_SB(s)->s_desc_size)
+#ifdef __KERNEL__
+# define EXT4_BLOCKS_PER_GROUP(s)      (EXT4_SB(s)->s_blocks_per_group)
+# define EXT4_DESC_PER_BLOCK(s)                (EXT4_SB(s)->s_desc_per_block)
+# define EXT4_INODES_PER_GROUP(s)      (EXT4_SB(s)->s_inodes_per_group)
+# define EXT4_DESC_PER_BLOCK_BITS(s)   (EXT4_SB(s)->s_desc_per_block_bits)
+#else
+# define EXT4_BLOCKS_PER_GROUP(s)      ((s)->s_blocks_per_group)
+# define EXT4_DESC_PER_BLOCK(s)                (EXT4_BLOCK_SIZE(s) / EXT4_DESC_SIZE(s))
+# define EXT4_INODES_PER_GROUP(s)      ((s)->s_inodes_per_group)
+#endif
+
+/*
+ * Constants relative to the data blocks
+ */
+#define        EXT4_NDIR_BLOCKS                12
+#define        EXT4_IND_BLOCK                  EXT4_NDIR_BLOCKS
+#define        EXT4_DIND_BLOCK                 (EXT4_IND_BLOCK + 1)
+#define        EXT4_TIND_BLOCK                 (EXT4_DIND_BLOCK + 1)
+#define        EXT4_N_BLOCKS                   (EXT4_TIND_BLOCK + 1)
+
+/*
+ * Inode flags
+ */
+#define        EXT4_SECRM_FL                   0x00000001 /* Secure deletion */
+#define        EXT4_UNRM_FL                    0x00000002 /* Undelete */
+#define        EXT4_COMPR_FL                   0x00000004 /* Compress file */
+#define EXT4_SYNC_FL                   0x00000008 /* Synchronous updates */
+#define EXT4_IMMUTABLE_FL              0x00000010 /* Immutable file */
+#define EXT4_APPEND_FL                 0x00000020 /* writes to file may only append */
+#define EXT4_NODUMP_FL                 0x00000040 /* do not dump file */
+#define EXT4_NOATIME_FL                        0x00000080 /* do not update atime */
+/* Reserved for compression usage... */
+#define EXT4_DIRTY_FL                  0x00000100
+#define EXT4_COMPRBLK_FL               0x00000200 /* One or more compressed clusters */
+#define EXT4_NOCOMPR_FL                        0x00000400 /* Don't compress */
+#define EXT4_ECOMPR_FL                 0x00000800 /* Compression error */
+/* End compression flags --- maybe not all used */
+#define EXT4_INDEX_FL                  0x00001000 /* hash-indexed directory */
+#define EXT4_IMAGIC_FL                 0x00002000 /* AFS directory */
+#define EXT4_JOURNAL_DATA_FL           0x00004000 /* file data should be journaled */
+#define EXT4_NOTAIL_FL                 0x00008000 /* file tail should not be merged */
+#define EXT4_DIRSYNC_FL                        0x00010000 /* dirsync behaviour (directories only) */
+#define EXT4_TOPDIR_FL                 0x00020000 /* Top of directory hierarchies*/
+#define EXT4_RESERVED_FL               0x80000000 /* reserved for ext4 lib */
+#define EXT4_EXTENTS_FL                        0x00080000 /* Inode uses extents */
+
+#define EXT4_FL_USER_VISIBLE           0x000BDFFF /* User visible flags */
+#define EXT4_FL_USER_MODIFIABLE                0x000380FF /* User modifiable flags */
+
+/*
+ * Inode dynamic state flags
+ */
+#define EXT4_STATE_JDATA               0x00000001 /* journaled data exists */
+#define EXT4_STATE_NEW                 0x00000002 /* inode is newly created */
+#define EXT4_STATE_XATTR               0x00000004 /* has in-inode xattrs */
+
+/* Used to pass group descriptor data when online resize is done */
+struct ext4_new_group_input {
+       __u32 group;            /* Group number for this data */
+       __u64 block_bitmap;     /* Absolute block number of block bitmap */
+       __u64 inode_bitmap;     /* Absolute block number of inode bitmap */
+       __u64 inode_table;      /* Absolute block number of inode table start */
+       __u32 blocks_count;     /* Total number of blocks in this group */
+       __u16 reserved_blocks;  /* Number of reserved blocks in this group */
+       __u16 unused;
+};
+
+/* The struct ext4_new_group_input in kernel space, with free_blocks_count */
+struct ext4_new_group_data {
+       __u32 group;
+       __u64 block_bitmap;
+       __u64 inode_bitmap;
+       __u64 inode_table;
+       __u32 blocks_count;
+       __u16 reserved_blocks;
+       __u16 unused;
+       __u32 free_blocks_count;
+};
+
+
+/*
+ * ioctl commands
+ */
+#define        EXT4_IOC_GETFLAGS               FS_IOC_GETFLAGS
+#define        EXT4_IOC_SETFLAGS               FS_IOC_SETFLAGS
+#define        EXT4_IOC_GETVERSION             _IOR('f', 3, long)
+#define        EXT4_IOC_SETVERSION             _IOW('f', 4, long)
+#define EXT4_IOC_GROUP_EXTEND          _IOW('f', 7, unsigned long)
+#define EXT4_IOC_GROUP_ADD             _IOW('f', 8,struct ext4_new_group_input)
+#define        EXT4_IOC_GETVERSION_OLD         FS_IOC_GETVERSION
+#define        EXT4_IOC_SETVERSION_OLD         FS_IOC_SETVERSION
+#ifdef CONFIG_JBD_DEBUG
+#define EXT4_IOC_WAIT_FOR_READONLY     _IOR('f', 99, long)
+#endif
+#define EXT4_IOC_GETRSVSZ              _IOR('f', 5, long)
+#define EXT4_IOC_SETRSVSZ              _IOW('f', 6, long)
+
+/*
+ * ioctl commands in 32 bit emulation
+ */
+#define EXT4_IOC32_GETFLAGS            FS_IOC32_GETFLAGS
+#define EXT4_IOC32_SETFLAGS            FS_IOC32_SETFLAGS
+#define EXT4_IOC32_GETVERSION          _IOR('f', 3, int)
+#define EXT4_IOC32_SETVERSION          _IOW('f', 4, int)
+#define EXT4_IOC32_GETRSVSZ            _IOR('f', 5, int)
+#define EXT4_IOC32_SETRSVSZ            _IOW('f', 6, int)
+#define EXT4_IOC32_GROUP_EXTEND                _IOW('f', 7, unsigned int)
+#ifdef CONFIG_JBD_DEBUG
+#define EXT4_IOC32_WAIT_FOR_READONLY   _IOR('f', 99, int)
+#endif
+#define EXT4_IOC32_GETVERSION_OLD      FS_IOC32_GETVERSION
+#define EXT4_IOC32_SETVERSION_OLD      FS_IOC32_SETVERSION
+
+
+/*
+ *  Mount options
+ */
+struct ext4_mount_options {
+       unsigned long s_mount_opt;
+       uid_t s_resuid;
+       gid_t s_resgid;
+       unsigned long s_commit_interval;
+#ifdef CONFIG_QUOTA
+       int s_jquota_fmt;
+       char *s_qf_names[MAXQUOTAS];
+#endif
+};
+
+/*
+ * Structure of an inode on the disk
+ */
+struct ext4_inode {
+       __le16  i_mode;         /* File mode */
+       __le16  i_uid;          /* Low 16 bits of Owner Uid */
+       __le32  i_size;         /* Size in bytes */
+       __le32  i_atime;        /* Access time */
+       __le32  i_ctime;        /* Creation time */
+       __le32  i_mtime;        /* Modification time */
+       __le32  i_dtime;        /* Deletion Time */
+       __le16  i_gid;          /* Low 16 bits of Group Id */
+       __le16  i_links_count;  /* Links count */
+       __le32  i_blocks;       /* Blocks count */
+       __le32  i_flags;        /* File flags */
+       union {
+               struct {
+                       __u32  l_i_reserved1;
+               } linux1;
+               struct {
+                       __u32  h_i_translator;
+               } hurd1;
+               struct {
+                       __u32  m_i_reserved1;
+               } masix1;
+       } osd1;                         /* OS dependent 1 */
+       __le32  i_block[EXT4_N_BLOCKS];/* Pointers to blocks */
+       __le32  i_generation;   /* File version (for NFS) */
+       __le32  i_file_acl;     /* File ACL */
+       __le32  i_dir_acl;      /* Directory ACL */
+       __le32  i_faddr;        /* Fragment address */
+       union {
+               struct {
+                       __u8    l_i_frag;       /* Fragment number */
+                       __u8    l_i_fsize;      /* Fragment size */
+                       __le16  l_i_file_acl_high;
+                       __le16  l_i_uid_high;   /* these 2 fields    */
+                       __le16  l_i_gid_high;   /* were reserved2[0] */
+                       __u32   l_i_reserved2;
+               } linux2;
+               struct {
+                       __u8    h_i_frag;       /* Fragment number */
+                       __u8    h_i_fsize;      /* Fragment size */
+                       __u16   h_i_mode_high;
+                       __u16   h_i_uid_high;
+                       __u16   h_i_gid_high;
+                       __u32   h_i_author;
+               } hurd2;
+               struct {
+                       __u8    m_i_frag;       /* Fragment number */
+                       __u8    m_i_fsize;      /* Fragment size */
+                       __le16  m_i_file_acl_high;
+                       __u32   m_i_reserved2[2];
+               } masix2;
+       } osd2;                         /* OS dependent 2 */
+       __le16  i_extra_isize;
+       __le16  i_pad1;
+};
+
+#define i_size_high    i_dir_acl
+
+#if defined(__KERNEL__) || defined(__linux__)
+#define i_reserved1    osd1.linux1.l_i_reserved1
+#define i_frag         osd2.linux2.l_i_frag
+#define i_fsize                osd2.linux2.l_i_fsize
+#define i_file_acl_high        osd2.linux2.l_i_file_acl_high
+#define i_uid_low      i_uid
+#define i_gid_low      i_gid
+#define i_uid_high     osd2.linux2.l_i_uid_high
+#define i_gid_high     osd2.linux2.l_i_gid_high
+#define i_reserved2    osd2.linux2.l_i_reserved2
+
+#elif defined(__GNU__)
+
+#define i_translator   osd1.hurd1.h_i_translator
+#define i_frag         osd2.hurd2.h_i_frag;
+#define i_fsize                osd2.hurd2.h_i_fsize;
+#define i_uid_high     osd2.hurd2.h_i_uid_high
+#define i_gid_high     osd2.hurd2.h_i_gid_high
+#define i_author       osd2.hurd2.h_i_author
+
+#elif defined(__masix__)
+
+#define i_reserved1    osd1.masix1.m_i_reserved1
+#define i_frag         osd2.masix2.m_i_frag
+#define i_fsize                osd2.masix2.m_i_fsize
+#define i_file_acl_high        osd2.masix2.m_i_file_acl_high
+#define i_reserved2    osd2.masix2.m_i_reserved2
+
+#endif /* defined(__KERNEL__) || defined(__linux__) */
+
+/*
+ * File system states
+ */
+#define        EXT4_VALID_FS                   0x0001  /* Unmounted cleanly */
+#define        EXT4_ERROR_FS                   0x0002  /* Errors detected */
+#define        EXT4_ORPHAN_FS                  0x0004  /* Orphans being recovered */
+
+/*
+ * Mount flags
+ */
+#define EXT4_MOUNT_CHECK               0x00001 /* Do mount-time checks */
+#define EXT4_MOUNT_OLDALLOC            0x00002  /* Don't use the new Orlov allocator */
+#define EXT4_MOUNT_GRPID               0x00004 /* Create files with directory's group */
+#define EXT4_MOUNT_DEBUG               0x00008 /* Some debugging messages */
+#define EXT4_MOUNT_ERRORS_CONT         0x00010 /* Continue on errors */
+#define EXT4_MOUNT_ERRORS_RO           0x00020 /* Remount fs ro on errors */
+#define EXT4_MOUNT_ERRORS_PANIC                0x00040 /* Panic on errors */
+#define EXT4_MOUNT_MINIX_DF            0x00080 /* Mimics the Minix statfs */
+#define EXT4_MOUNT_NOLOAD              0x00100 /* Don't use existing journal*/
+#define EXT4_MOUNT_ABORT               0x00200 /* Fatal error detected */
+#define EXT4_MOUNT_DATA_FLAGS          0x00C00 /* Mode for data writes: */
+#define EXT4_MOUNT_JOURNAL_DATA                0x00400 /* Write data to journal */
+#define EXT4_MOUNT_ORDERED_DATA                0x00800 /* Flush data before commit */
+#define EXT4_MOUNT_WRITEBACK_DATA      0x00C00 /* No data ordering */
+#define EXT4_MOUNT_UPDATE_JOURNAL      0x01000 /* Update the journal format */
+#define EXT4_MOUNT_NO_UID32            0x02000  /* Disable 32-bit UIDs */
+#define EXT4_MOUNT_XATTR_USER          0x04000 /* Extended user attributes */
+#define EXT4_MOUNT_POSIX_ACL           0x08000 /* POSIX Access Control Lists */
+#define EXT4_MOUNT_RESERVATION         0x10000 /* Preallocation */
+#define EXT4_MOUNT_BARRIER             0x20000 /* Use block barriers */
+#define EXT4_MOUNT_NOBH                        0x40000 /* No bufferheads */
+#define EXT4_MOUNT_QUOTA               0x80000 /* Some quota option set */
+#define EXT4_MOUNT_USRQUOTA            0x100000 /* "old" user quota */
+#define EXT4_MOUNT_GRPQUOTA            0x200000 /* "old" group quota */
+#define EXT4_MOUNT_EXTENTS             0x400000 /* Extents support */
+
+/* Compatibility, for having both ext2_fs.h and ext4_fs.h included at once */
+#ifndef _LINUX_EXT2_FS_H
+#define clear_opt(o, opt)              o &= ~EXT4_MOUNT_##opt
+#define set_opt(o, opt)                        o |= EXT4_MOUNT_##opt
+#define test_opt(sb, opt)              (EXT4_SB(sb)->s_mount_opt & \
+                                        EXT4_MOUNT_##opt)
+#else
+#define EXT2_MOUNT_NOLOAD              EXT4_MOUNT_NOLOAD
+#define EXT2_MOUNT_ABORT               EXT4_MOUNT_ABORT
+#define EXT2_MOUNT_DATA_FLAGS          EXT4_MOUNT_DATA_FLAGS
+#endif
+
+#define ext4_set_bit                   ext2_set_bit
+#define ext4_set_bit_atomic            ext2_set_bit_atomic
+#define ext4_clear_bit                 ext2_clear_bit
+#define ext4_clear_bit_atomic          ext2_clear_bit_atomic
+#define ext4_test_bit                  ext2_test_bit
+#define ext4_find_first_zero_bit       ext2_find_first_zero_bit
+#define ext4_find_next_zero_bit                ext2_find_next_zero_bit
+
+/*
+ * Maximal mount counts between two filesystem checks
+ */
+#define EXT4_DFL_MAX_MNT_COUNT         20      /* Allow 20 mounts */
+#define EXT4_DFL_CHECKINTERVAL         0       /* Don't use interval check */
+
+/*
+ * Behaviour when detecting errors
+ */
+#define EXT4_ERRORS_CONTINUE           1       /* Continue execution */
+#define EXT4_ERRORS_RO                 2       /* Remount fs read-only */
+#define EXT4_ERRORS_PANIC              3       /* Panic */
+#define EXT4_ERRORS_DEFAULT            EXT4_ERRORS_CONTINUE
+
+/*
+ * Structure of the super block
+ */
+struct ext4_super_block {
+/*00*/ __le32  s_inodes_count;         /* Inodes count */
+       __le32  s_blocks_count;         /* Blocks count */
+       __le32  s_r_blocks_count;       /* Reserved blocks count */
+       __le32  s_free_blocks_count;    /* Free blocks count */
+/*10*/ __le32  s_free_inodes_count;    /* Free inodes count */
+       __le32  s_first_data_block;     /* First Data Block */
+       __le32  s_log_block_size;       /* Block size */
+       __le32  s_log_frag_size;        /* Fragment size */
+/*20*/ __le32  s_blocks_per_group;     /* # Blocks per group */
+       __le32  s_frags_per_group;      /* # Fragments per group */
+       __le32  s_inodes_per_group;     /* # Inodes per group */
+       __le32  s_mtime;                /* Mount time */
+/*30*/ __le32  s_wtime;                /* Write time */
+       __le16  s_mnt_count;            /* Mount count */
+       __le16  s_max_mnt_count;        /* Maximal mount count */
+       __le16  s_magic;                /* Magic signature */
+       __le16  s_state;                /* File system state */
+       __le16  s_errors;               /* Behaviour when detecting errors */
+       __le16  s_minor_rev_level;      /* minor revision level */
+/*40*/ __le32  s_lastcheck;            /* time of last check */
+       __le32  s_checkinterval;        /* max. time between checks */
+       __le32  s_creator_os;           /* OS */
+       __le32  s_rev_level;            /* Revision level */
+/*50*/ __le16  s_def_resuid;           /* Default uid for reserved blocks */
+       __le16  s_def_resgid;           /* Default gid for reserved blocks */
+       /*
+        * These fields are for EXT4_DYNAMIC_REV superblocks only.
+        *
+        * Note: the difference between the compatible feature set and
+        * the incompatible feature set is that if there is a bit set
+        * in the incompatible feature set that the kernel doesn't
+        * know about, it should refuse to mount the filesystem.
+        *
+        * e2fsck's requirements are more strict; if it doesn't know
+        * about a feature in either the compatible or incompatible
+        * feature set, it must abort and not try to meddle with
+        * things it doesn't understand...
+        */
+       __le32  s_first_ino;            /* First non-reserved inode */
+       __le16  s_inode_size;           /* size of inode structure */
+       __le16  s_block_group_nr;       /* block group # of this superblock */
+       __le32  s_feature_compat;       /* compatible feature set */
+/*60*/ __le32  s_feature_incompat;     /* incompatible feature set */
+       __le32  s_feature_ro_compat;    /* readonly-compatible feature set */
+/*68*/ __u8    s_uuid[16];             /* 128-bit uuid for volume */
+/*78*/ char    s_volume_name[16];      /* volume name */
+/*88*/ char    s_last_mounted[64];     /* directory where last mounted */
+/*C8*/ __le32  s_algorithm_usage_bitmap; /* For compression */
+       /*
+        * Performance hints.  Directory preallocation should only
+        * happen if the EXT4_FEATURE_COMPAT_DIR_PREALLOC flag is on.
+        */
+       __u8    s_prealloc_blocks;      /* Nr of blocks to try to preallocate*/
+       __u8    s_prealloc_dir_blocks;  /* Nr to preallocate for dirs */
+       __le16  s_reserved_gdt_blocks;  /* Per group desc for online growth */
+       /*
+        * Journaling support valid if EXT4_FEATURE_COMPAT_HAS_JOURNAL set.
+        */
+/*D0*/ __u8    s_journal_uuid[16];     /* uuid of journal superblock */
+/*E0*/ __le32  s_journal_inum;         /* inode number of journal file */
+       __le32  s_journal_dev;          /* device number of journal file */
+       __le32  s_last_orphan;          /* start of list of inodes to delete */
+       __le32  s_hash_seed[4];         /* HTREE hash seed */
+       __u8    s_def_hash_version;     /* Default hash version to use */
+       __u8    s_reserved_char_pad;
+       __le16  s_desc_size;            /* size of group descriptor */
+/*100*/        __le32  s_default_mount_opts;
+       __le32  s_first_meta_bg;        /* First metablock block group */
+       __le32  s_mkfs_time;            /* When the filesystem was created */
+       __le32  s_jnl_blocks[17];       /* Backup of the journal inode */
+       /* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
+/*150*/        __le32  s_blocks_count_hi;      /* Blocks count */
+       __le32  s_r_blocks_count_hi;    /* Reserved blocks count */
+       __le32  s_free_blocks_count_hi; /* Free blocks count */
+       __u32   s_reserved[169];        /* Padding to the end of the block */
+};
+
+#ifdef __KERNEL__
+static inline struct ext4_sb_info * EXT4_SB(struct super_block *sb)
+{
+       return sb->s_fs_info;
+}
+static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
+{
+       return container_of(inode, struct ext4_inode_info, vfs_inode);
+}
+
+static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
+{
+       return ino == EXT4_ROOT_INO ||
+               ino == EXT4_JOURNAL_INO ||
+               ino == EXT4_RESIZE_INO ||
+               (ino >= EXT4_FIRST_INO(sb) &&
+                ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
+}
+#else
+/* Assume that user mode programs are passing in an ext4fs superblock, not
+ * a kernel struct super_block.  This will allow us to call the feature-test
+ * macros from user land. */
+#define EXT4_SB(sb)    (sb)
+#endif
+
+#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
+
+/*
+ * Codes for operating systems
+ */
+#define EXT4_OS_LINUX          0
+#define EXT4_OS_HURD           1
+#define EXT4_OS_MASIX          2
+#define EXT4_OS_FREEBSD                3
+#define EXT4_OS_LITES          4
+
+/*
+ * Revision levels
+ */
+#define EXT4_GOOD_OLD_REV      0       /* The good old (original) format */
+#define EXT4_DYNAMIC_REV       1       /* V2 format w/ dynamic inode sizes */
+
+#define EXT4_CURRENT_REV       EXT4_GOOD_OLD_REV
+#define EXT4_MAX_SUPP_REV      EXT4_DYNAMIC_REV
+
+#define EXT4_GOOD_OLD_INODE_SIZE 128
+
+/*
+ * Feature set definitions
+ */
+
+#define EXT4_HAS_COMPAT_FEATURE(sb,mask)                       \
+       ( EXT4_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
+#define EXT4_HAS_RO_COMPAT_FEATURE(sb,mask)                    \
+       ( EXT4_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
+#define EXT4_HAS_INCOMPAT_FEATURE(sb,mask)                     \
+       ( EXT4_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
+#define EXT4_SET_COMPAT_FEATURE(sb,mask)                       \
+       EXT4_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
+#define EXT4_SET_RO_COMPAT_FEATURE(sb,mask)                    \
+       EXT4_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
+#define EXT4_SET_INCOMPAT_FEATURE(sb,mask)                     \
+       EXT4_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
+#define EXT4_CLEAR_COMPAT_FEATURE(sb,mask)                     \
+       EXT4_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
+#define EXT4_CLEAR_RO_COMPAT_FEATURE(sb,mask)                  \
+       EXT4_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
+#define EXT4_CLEAR_INCOMPAT_FEATURE(sb,mask)                   \
+       EXT4_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
+
+#define EXT4_FEATURE_COMPAT_DIR_PREALLOC       0x0001
+#define EXT4_FEATURE_COMPAT_IMAGIC_INODES      0x0002
+#define EXT4_FEATURE_COMPAT_HAS_JOURNAL                0x0004
+#define EXT4_FEATURE_COMPAT_EXT_ATTR           0x0008
+#define EXT4_FEATURE_COMPAT_RESIZE_INODE       0x0010
+#define EXT4_FEATURE_COMPAT_DIR_INDEX          0x0020
+
+#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER    0x0001
+#define EXT4_FEATURE_RO_COMPAT_LARGE_FILE      0x0002
+#define EXT4_FEATURE_RO_COMPAT_BTREE_DIR       0x0004
+
+#define EXT4_FEATURE_INCOMPAT_COMPRESSION      0x0001
+#define EXT4_FEATURE_INCOMPAT_FILETYPE         0x0002
+#define EXT4_FEATURE_INCOMPAT_RECOVER          0x0004 /* Needs recovery */
+#define EXT4_FEATURE_INCOMPAT_JOURNAL_DEV      0x0008 /* Journal device */
+#define EXT4_FEATURE_INCOMPAT_META_BG          0x0010
+#define EXT4_FEATURE_INCOMPAT_EXTENTS          0x0040 /* extents support */
+#define EXT4_FEATURE_INCOMPAT_64BIT            0x0080
+
+#define EXT4_FEATURE_COMPAT_SUPP       EXT2_FEATURE_COMPAT_EXT_ATTR
+#define EXT4_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
+                                        EXT4_FEATURE_INCOMPAT_RECOVER| \
+                                        EXT4_FEATURE_INCOMPAT_META_BG| \
+                                        EXT4_FEATURE_INCOMPAT_EXTENTS| \
+                                        EXT4_FEATURE_INCOMPAT_64BIT)
+#define EXT4_FEATURE_RO_COMPAT_SUPP    (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+                                        EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+                                        EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
+
+/*
+ * Default values for user and/or group using reserved blocks
+ */
+#define        EXT4_DEF_RESUID         0
+#define        EXT4_DEF_RESGID         0
+
+/*
+ * Default mount options
+ */
+#define EXT4_DEFM_DEBUG                0x0001
+#define EXT4_DEFM_BSDGROUPS    0x0002
+#define EXT4_DEFM_XATTR_USER   0x0004
+#define EXT4_DEFM_ACL          0x0008
+#define EXT4_DEFM_UID16                0x0010
+#define EXT4_DEFM_JMODE                0x0060
+#define EXT4_DEFM_JMODE_DATA   0x0020
+#define EXT4_DEFM_JMODE_ORDERED        0x0040
+#define EXT4_DEFM_JMODE_WBACK  0x0060
+
+/*
+ * Structure of a directory entry
+ */
+#define EXT4_NAME_LEN 255
+
+struct ext4_dir_entry {
+       __le32  inode;                  /* Inode number */
+       __le16  rec_len;                /* Directory entry length */
+       __le16  name_len;               /* Name length */
+       char    name[EXT4_NAME_LEN];    /* File name */
+};
+
+/*
+ * The new version of the directory entry.  Since EXT4 structures are
+ * stored in intel byte order, and the name_len field could never be
+ * bigger than 255 chars, it's safe to reclaim the extra byte for the
+ * file_type field.
+ */
+struct ext4_dir_entry_2 {
+       __le32  inode;                  /* Inode number */
+       __le16  rec_len;                /* Directory entry length */
+       __u8    name_len;               /* Name length */
+       __u8    file_type;
+       char    name[EXT4_NAME_LEN];    /* File name */
+};
+
+/*
+ * Ext4 directory file types.  Only the low 3 bits are used.  The
+ * other bits are reserved for now.
+ */
+#define EXT4_FT_UNKNOWN                0
+#define EXT4_FT_REG_FILE       1
+#define EXT4_FT_DIR            2
+#define EXT4_FT_CHRDEV         3
+#define EXT4_FT_BLKDEV         4
+#define EXT4_FT_FIFO           5
+#define EXT4_FT_SOCK           6
+#define EXT4_FT_SYMLINK                7
+
+#define EXT4_FT_MAX            8
+
+/*
+ * EXT4_DIR_PAD defines the directory entries boundaries
+ *
+ * NOTE: It must be a multiple of 4
+ */
+#define EXT4_DIR_PAD                   4
+#define EXT4_DIR_ROUND                 (EXT4_DIR_PAD - 1)
+#define EXT4_DIR_REC_LEN(name_len)     (((name_len) + 8 + EXT4_DIR_ROUND) & \
+                                        ~EXT4_DIR_ROUND)
+/*
+ * Hash Tree Directory indexing
+ * (c) Daniel Phillips, 2001
+ */
+
+#ifdef CONFIG_EXT4_INDEX
+  #define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \
+                                             EXT4_FEATURE_COMPAT_DIR_INDEX) && \
+                     (EXT4_I(dir)->i_flags & EXT4_INDEX_FL))
+#define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX)
+#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
+#else
+  #define is_dx(dir) 0
+#define EXT4_DIR_LINK_MAX(dir) ((dir)->i_nlink >= EXT4_LINK_MAX)
+#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2)
+#endif
+
+/* Legal values for the dx_root hash_version field: */
+
+#define DX_HASH_LEGACY         0
+#define DX_HASH_HALF_MD4       1
+#define DX_HASH_TEA            2
+
+#ifdef __KERNEL__
+
+/* hash info structure used by the directory hash */
+struct dx_hash_info
+{
+       u32             hash;
+       u32             minor_hash;
+       int             hash_version;
+       u32             *seed;
+};
+
+#define EXT4_HTREE_EOF 0x7fffffff
+
+/*
+ * Control parameters used by ext4_htree_next_block
+ */
+#define HASH_NB_ALWAYS         1
+
+
+/*
+ * Describe an inode's exact location on disk and in memory
+ */
+struct ext4_iloc
+{
+       struct buffer_head *bh;
+       unsigned long offset;
+       unsigned long block_group;
+};
+
+static inline struct ext4_inode *ext4_raw_inode(struct ext4_iloc *iloc)
+{
+       return (struct ext4_inode *) (iloc->bh->b_data + iloc->offset);
+}
+
+/*
+ * This structure is stuffed into the struct file's private_data field
+ * for directories.  It is where we put information so that we can do
+ * readdir operations in hash tree order.
+ */
+struct dir_private_info {
+       struct rb_root  root;
+       struct rb_node  *curr_node;
+       struct fname    *extra_fname;
+       loff_t          last_pos;
+       __u32           curr_hash;
+       __u32           curr_minor_hash;
+       __u32           next_hash;
+};
+
+/* calculate the first block number of the group */
+static inline ext4_fsblk_t
+ext4_group_first_block_no(struct super_block *sb, unsigned long group_no)
+{
+       return group_no * (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
+               le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
+}
+
+/*
+ * Special error return code only used by dx_probe() and its callers.
+ */
+#define ERR_BAD_DX_DIR -75000
+
+void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
+                       unsigned long *blockgrpp, ext4_grpblk_t *offsetp);
+
+/*
+ * Function prototypes
+ */
+
+/*
+ * Ok, these declarations are also in <linux/kernel.h> but none of the
+ * ext4 source programs needs to include it so they are duplicated here.
+ */
+# define NORET_TYPE    /**/
+# define ATTRIB_NORET  __attribute__((noreturn))
+# define NORET_AND     noreturn,
+
+/* balloc.c */
+extern unsigned int ext4_block_group(struct super_block *sb,
+                       ext4_fsblk_t blocknr);
+extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb,
+                       ext4_fsblk_t blocknr);
+extern int ext4_bg_has_super(struct super_block *sb, int group);
+extern unsigned long ext4_bg_num_gdb(struct super_block *sb, int group);
+extern ext4_fsblk_t ext4_new_block (handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t goal, int *errp);
+extern ext4_fsblk_t ext4_new_blocks (handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t goal, unsigned long *count, int *errp);
+extern void ext4_free_blocks (handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t block, unsigned long count);
+extern void ext4_free_blocks_sb (handle_t *handle, struct super_block *sb,
+                                ext4_fsblk_t block, unsigned long count,
+                               unsigned long *pdquot_freed_blocks);
+extern ext4_fsblk_t ext4_count_free_blocks (struct super_block *);
+extern void ext4_check_blocks_bitmap (struct super_block *);
+extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
+                                                   unsigned int block_group,
+                                                   struct buffer_head ** bh);
+extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
+extern void ext4_init_block_alloc_info(struct inode *);
+extern void ext4_rsv_window_add(struct super_block *sb, struct ext4_reserve_window_node *rsv);
+
+/* dir.c */
+extern int ext4_check_dir_entry(const char *, struct inode *,
+                               struct ext4_dir_entry_2 *,
+                               struct buffer_head *, unsigned long);
+extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
+                                   __u32 minor_hash,
+                                   struct ext4_dir_entry_2 *dirent);
+extern void ext4_htree_free_dir_info(struct dir_private_info *p);
+
+/* fsync.c */
+extern int ext4_sync_file (struct file *, struct dentry *, int);
+
+/* hash.c */
+extern int ext4fs_dirhash(const char *name, int len, struct
+                         dx_hash_info *hinfo);
+
+/* ialloc.c */
+extern struct inode * ext4_new_inode (handle_t *, struct inode *, int);
+extern void ext4_free_inode (handle_t *, struct inode *);
+extern struct inode * ext4_orphan_get (struct super_block *, unsigned long);
+extern unsigned long ext4_count_free_inodes (struct super_block *);
+extern unsigned long ext4_count_dirs (struct super_block *);
+extern void ext4_check_inodes_bitmap (struct super_block *);
+extern unsigned long ext4_count_free (struct buffer_head *, unsigned);
+
+
+/* inode.c */
+int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
+               struct buffer_head *bh, ext4_fsblk_t blocknr);
+struct buffer_head * ext4_getblk (handle_t *, struct inode *, long, int, int *);
+struct buffer_head * ext4_bread (handle_t *, struct inode *, int, int, int *);
+int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
+       sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
+       int create, int extend_disksize);
+
+extern void ext4_read_inode (struct inode *);
+extern int  ext4_write_inode (struct inode *, int);
+extern int  ext4_setattr (struct dentry *, struct iattr *);
+extern void ext4_delete_inode (struct inode *);
+extern int  ext4_sync_inode (handle_t *, struct inode *);
+extern void ext4_discard_reservation (struct inode *);
+extern void ext4_dirty_inode(struct inode *);
+extern int ext4_change_inode_journal_flag(struct inode *, int);
+extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
+extern void ext4_truncate (struct inode *);
+extern void ext4_set_inode_flags(struct inode *);
+extern void ext4_set_aops(struct inode *inode);
+extern int ext4_writepage_trans_blocks(struct inode *);
+extern int ext4_block_truncate_page(handle_t *handle, struct page *page,
+               struct address_space *mapping, loff_t from);
+
+/* ioctl.c */
+extern int ext4_ioctl (struct inode *, struct file *, unsigned int,
+                      unsigned long);
+extern long ext4_compat_ioctl (struct file *, unsigned int, unsigned long);
+
+/* namei.c */
+extern int ext4_orphan_add(handle_t *, struct inode *);
+extern int ext4_orphan_del(handle_t *, struct inode *);
+extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
+                               __u32 start_minor_hash, __u32 *next_hash);
+
+/* resize.c */
+extern int ext4_group_add(struct super_block *sb,
+                               struct ext4_new_group_data *input);
+extern int ext4_group_extend(struct super_block *sb,
+                               struct ext4_super_block *es,
+                               ext4_fsblk_t n_blocks_count);
+
+/* super.c */
+extern void ext4_error (struct super_block *, const char *, const char *, ...)
+       __attribute__ ((format (printf, 3, 4)));
+extern void __ext4_std_error (struct super_block *, const char *, int);
+extern void ext4_abort (struct super_block *, const char *, const char *, ...)
+       __attribute__ ((format (printf, 3, 4)));
+extern void ext4_warning (struct super_block *, const char *, const char *, ...)
+       __attribute__ ((format (printf, 3, 4)));
+extern void ext4_update_dynamic_rev (struct super_block *sb);
+extern ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
+                                     struct ext4_group_desc *bg);
+extern ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
+                                     struct ext4_group_desc *bg);
+extern ext4_fsblk_t ext4_inode_table(struct super_block *sb,
+                                    struct ext4_group_desc *bg);
+extern void ext4_block_bitmap_set(struct super_block *sb,
+                                 struct ext4_group_desc *bg, ext4_fsblk_t blk);
+extern void ext4_inode_bitmap_set(struct super_block *sb,
+                                 struct ext4_group_desc *bg, ext4_fsblk_t blk);
+extern void ext4_inode_table_set(struct super_block *sb,
+                                struct ext4_group_desc *bg, ext4_fsblk_t blk);
+
+static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
+{
+       return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
+               le32_to_cpu(es->s_blocks_count);
+}
+
+static inline ext4_fsblk_t ext4_r_blocks_count(struct ext4_super_block *es)
+{
+       return ((ext4_fsblk_t)le32_to_cpu(es->s_r_blocks_count_hi) << 32) |
+               le32_to_cpu(es->s_r_blocks_count);
+}
+
+static inline ext4_fsblk_t ext4_free_blocks_count(struct ext4_super_block *es)
+{
+       return ((ext4_fsblk_t)le32_to_cpu(es->s_free_blocks_count_hi) << 32) |
+               le32_to_cpu(es->s_free_blocks_count);
+}
+
+static inline void ext4_blocks_count_set(struct ext4_super_block *es,
+                                        ext4_fsblk_t blk)
+{
+       es->s_blocks_count = cpu_to_le32((u32)blk);
+       es->s_blocks_count_hi = cpu_to_le32(blk >> 32);
+}
+
+static inline void ext4_free_blocks_count_set(struct ext4_super_block *es,
+                                             ext4_fsblk_t blk)
+{
+       es->s_free_blocks_count = cpu_to_le32((u32)blk);
+       es->s_free_blocks_count_hi = cpu_to_le32(blk >> 32);
+}
+
+static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
+                                          ext4_fsblk_t blk)
+{
+       es->s_r_blocks_count = cpu_to_le32((u32)blk);
+       es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
+}
+
+
+
+#define ext4_std_error(sb, errno)                              \
+do {                                                           \
+       if ((errno))                                            \
+               __ext4_std_error((sb), __FUNCTION__, (errno));  \
+} while (0)
+
+/*
+ * Inodes and files operations
+ */
+
+/* dir.c */
+extern const struct file_operations ext4_dir_operations;
+
+/* file.c */
+extern struct inode_operations ext4_file_inode_operations;
+extern const struct file_operations ext4_file_operations;
+
+/* namei.c */
+extern struct inode_operations ext4_dir_inode_operations;
+extern struct inode_operations ext4_special_inode_operations;
+
+/* symlink.c */
+extern struct inode_operations ext4_symlink_inode_operations;
+extern struct inode_operations ext4_fast_symlink_inode_operations;
+
+/* extents.c */
+extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
+extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
+extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
+                       ext4_fsblk_t iblock,
+                       unsigned long max_blocks, struct buffer_head *bh_result,
+                       int create, int extend_disksize);
+extern void ext4_ext_truncate(struct inode *, struct page *);
+extern void ext4_ext_init(struct super_block *);
+extern void ext4_ext_release(struct super_block *);
+static inline int
+ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
+                       unsigned long max_blocks, struct buffer_head *bh,
+                       int create, int extend_disksize)
+{
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
+               return ext4_ext_get_blocks(handle, inode, block, max_blocks,
+                                       bh, create, extend_disksize);
+       return ext4_get_blocks_handle(handle, inode, block, max_blocks, bh,
+                                       create, extend_disksize);
+}
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_EXT4_FS_H */
diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h
new file mode 100644 (file)
index 0000000..a41cc24
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
+ * Written by Alex Tomas <alex@clusterfs.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
+ */
+
+#ifndef _LINUX_EXT4_EXTENTS
+#define _LINUX_EXT4_EXTENTS
+
+#include <linux/ext4_fs.h>
+
+/*
+ * With AGRESSIVE_TEST defined, the capacity of index/leaf blocks
+ * becomes very small, so index split, in-depth growing and
+ * other hard changes happen much more often.
+ * This is for debug purposes only.
+ */
+#define AGRESSIVE_TEST_
+
+/*
+ * With EXTENTS_STATS defined, the number of blocks and extents
+ * are collected in the truncate path. They'll be shown at
+ * umount time.
+ */
+#define EXTENTS_STATS__
+
+/*
+ * If CHECK_BINSEARCH is defined, then the results of the binary search
+ * will also be checked by linear search.
+ */
+#define CHECK_BINSEARCH__
+
+/*
+ * If EXT_DEBUG is defined you can use the 'extdebug' mount option
+ * to get lots of info about what's going on.
+ */
+#define EXT_DEBUG__
+#ifdef EXT_DEBUG
+#define ext_debug(a...)                printk(a)
+#else
+#define ext_debug(a...)
+#endif
+
+/*
+ * If EXT_STATS is defined then stats numbers are collected.
+ * These number will be displayed at umount time.
+ */
+#define EXT_STATS_
+
+
+/*
+ * ext4_inode has i_block array (60 bytes total).
+ * The first 12 bytes store ext4_extent_header;
+ * the remainder stores an array of ext4_extent.
+ */
+
+/*
+ * This is the extent on-disk structure.
+ * It's used at the bottom of the tree.
+ */
+struct ext4_extent {
+       __le32  ee_block;       /* first logical block extent covers */
+       __le16  ee_len;         /* number of blocks covered by extent */
+       __le16  ee_start_hi;    /* high 16 bits of physical block */
+       __le32  ee_start;       /* low 32 bits of physical block */
+};
+
+/*
+ * This is index on-disk structure.
+ * It's used at all the levels except the bottom.
+ */
+struct ext4_extent_idx {
+       __le32  ei_block;       /* index covers logical blocks from 'block' */
+       __le32  ei_leaf;        /* pointer to the physical block of the next *
+                                * level. leaf or next index could be there */
+       __le16  ei_leaf_hi;     /* high 16 bits of physical block */
+       __u16   ei_unused;
+};
+
+/*
+ * Each block (leaves and indexes), even inode-stored has header.
+ */
+struct ext4_extent_header {
+       __le16  eh_magic;       /* probably will support different formats */
+       __le16  eh_entries;     /* number of valid entries */
+       __le16  eh_max;         /* capacity of store in entries */
+       __le16  eh_depth;       /* has tree real underlying blocks? */
+       __le32  eh_generation;  /* generation of the tree */
+};
+
+#define EXT4_EXT_MAGIC         cpu_to_le16(0xf30a)
+
+/*
+ * Array of ext4_ext_path contains path to some extent.
+ * Creation/lookup routines use it for traversal/splitting/etc.
+ * Truncate uses it to simulate recursive walking.
+ */
+struct ext4_ext_path {
+       ext4_fsblk_t                    p_block;
+       __u16                           p_depth;
+       struct ext4_extent              *p_ext;
+       struct ext4_extent_idx          *p_idx;
+       struct ext4_extent_header       *p_hdr;
+       struct buffer_head              *p_bh;
+};
+
+/*
+ * structure for external API
+ */
+
+#define EXT4_EXT_CACHE_NO      0
+#define EXT4_EXT_CACHE_GAP     1
+#define EXT4_EXT_CACHE_EXTENT  2
+
+/*
+ * to be called by ext4_ext_walk_space()
+ * negative retcode - error
+ * positive retcode - signal for ext4_ext_walk_space(), see below
+ * callback must return valid extent (passed or newly created)
+ */
+typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *,
+                                       struct ext4_ext_cache *,
+                                       void *);
+
+#define EXT_CONTINUE   0
+#define EXT_BREAK      1
+#define EXT_REPEAT     2
+
+
+#define EXT_MAX_BLOCK  0xffffffff
+
+#define EXT_MAX_LEN    ((1UL << 15) - 1)
+
+
+#define EXT_FIRST_EXTENT(__hdr__) \
+       ((struct ext4_extent *) (((char *) (__hdr__)) +         \
+                                sizeof(struct ext4_extent_header)))
+#define EXT_FIRST_INDEX(__hdr__) \
+       ((struct ext4_extent_idx *) (((char *) (__hdr__)) +     \
+                                    sizeof(struct ext4_extent_header)))
+#define EXT_HAS_FREE_INDEX(__path__) \
+        (le16_to_cpu((__path__)->p_hdr->eh_entries) \
+                                    < le16_to_cpu((__path__)->p_hdr->eh_max))
+#define EXT_LAST_EXTENT(__hdr__) \
+       (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
+#define EXT_LAST_INDEX(__hdr__) \
+       (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
+#define EXT_MAX_EXTENT(__hdr__) \
+       (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
+#define EXT_MAX_INDEX(__hdr__) \
+       (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
+
+static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode)
+{
+       return (struct ext4_extent_header *) EXT4_I(inode)->i_data;
+}
+
+static inline struct ext4_extent_header *ext_block_hdr(struct buffer_head *bh)
+{
+       return (struct ext4_extent_header *) bh->b_data;
+}
+
+static inline unsigned short ext_depth(struct inode *inode)
+{
+       return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
+}
+
+static inline void ext4_ext_tree_changed(struct inode *inode)
+{
+       EXT4_I(inode)->i_ext_generation++;
+}
+
+static inline void
+ext4_ext_invalidate_cache(struct inode *inode)
+{
+       EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO;
+}
+
+extern int ext4_extent_tree_init(handle_t *, struct inode *);
+extern int ext4_ext_calc_credits_for_insert(struct inode *, struct ext4_ext_path *);
+extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *);
+extern int ext4_ext_walk_space(struct inode *, unsigned long, unsigned long, ext_prepare_callback, void *);
+extern struct ext4_ext_path * ext4_ext_find_extent(struct inode *, int, struct ext4_ext_path *);
+
+#endif /* _LINUX_EXT4_EXTENTS */
+
diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h
new file mode 100644 (file)
index 0000000..bb42379
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ *  linux/include/linux/ext4_fs_i.h
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/include/linux/minix_fs_i.h
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#ifndef _LINUX_EXT4_FS_I
+#define _LINUX_EXT4_FS_I
+
+#include <linux/rwsem.h>
+#include <linux/rbtree.h>
+#include <linux/seqlock.h>
+#include <linux/mutex.h>
+
+/* data type for block offset of block group */
+typedef int ext4_grpblk_t;
+
+/* data type for filesystem-wide blocks number */
+typedef unsigned long long ext4_fsblk_t;
+
+struct ext4_reserve_window {
+       ext4_fsblk_t    _rsv_start;     /* First byte reserved */
+       ext4_fsblk_t    _rsv_end;       /* Last byte reserved or 0 */
+};
+
+struct ext4_reserve_window_node {
+       struct rb_node          rsv_node;
+       __u32                   rsv_goal_size;
+       __u32                   rsv_alloc_hit;
+       struct ext4_reserve_window      rsv_window;
+};
+
+struct ext4_block_alloc_info {
+       /* information about reservation window */
+       struct ext4_reserve_window_node rsv_window_node;
+       /*
+        * was i_next_alloc_block in ext4_inode_info
+        * is the logical (file-relative) number of the
+        * most-recently-allocated block in this file.
+        * We use this for detecting linearly ascending allocation requests.
+        */
+       __u32                   last_alloc_logical_block;
+       /*
+        * Was i_next_alloc_goal in ext4_inode_info
+        * is the *physical* companion to i_next_alloc_block.
+        * it the the physical block number of the block which was most-recentl
+        * allocated to this file.  This give us the goal (target) for the next
+        * allocation when we detect linearly ascending requests.
+        */
+       ext4_fsblk_t            last_alloc_physical_block;
+};
+
+#define rsv_start rsv_window._rsv_start
+#define rsv_end rsv_window._rsv_end
+
+/*
+ * storage for cached extent
+ */
+struct ext4_ext_cache {
+       ext4_fsblk_t    ec_start;
+       __u32           ec_block;
+       __u32           ec_len; /* must be 32bit to return holes */
+       __u32           ec_type;
+};
+
+/*
+ * third extended file system inode data in memory
+ */
+struct ext4_inode_info {
+       __le32  i_data[15];     /* unconverted */
+       __u32   i_flags;
+#ifdef EXT4_FRAGMENTS
+       __u32   i_faddr;
+       __u8    i_frag_no;
+       __u8    i_frag_size;
+#endif
+       ext4_fsblk_t    i_file_acl;
+       __u32   i_dir_acl;
+       __u32   i_dtime;
+
+       /*
+        * i_block_group is the number of the block group which contains
+        * this file's inode.  Constant across the lifetime of the inode,
+        * it is ued for making block allocation decisions - we try to
+        * place a file's data blocks near its inode block, and new inodes
+        * near to their parent directory's inode.
+        */
+       __u32   i_block_group;
+       __u32   i_state;                /* Dynamic state flags for ext4 */
+
+       /* block reservation info */
+       struct ext4_block_alloc_info *i_block_alloc_info;
+
+       __u32   i_dir_start_lookup;
+#ifdef CONFIG_EXT4DEV_FS_XATTR
+       /*
+        * Extended attributes can be read independently of the main file
+        * data. Taking i_mutex even when reading would cause contention
+        * between readers of EAs and writers of regular file data, so
+        * instead we synchronize on xattr_sem when reading or changing
+        * EAs.
+        */
+       struct rw_semaphore xattr_sem;
+#endif
+#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
+       struct posix_acl        *i_acl;
+       struct posix_acl        *i_default_acl;
+#endif
+
+       struct list_head i_orphan;      /* unlinked but open inodes */
+
+       /*
+        * i_disksize keeps track of what the inode size is ON DISK, not
+        * in memory.  During truncate, i_size is set to the new size by
+        * the VFS prior to calling ext4_truncate(), but the filesystem won't
+        * set i_disksize to 0 until the truncate is actually under way.
+        *
+        * The intent is that i_disksize always represents the blocks which
+        * are used by this file.  This allows recovery to restart truncate
+        * on orphans if we crash during truncate.  We actually write i_disksize
+        * into the on-disk inode when writing inodes out, instead of i_size.
+        *
+        * The only time when i_disksize and i_size may be different is when
+        * a truncate is in progress.  The only things which change i_disksize
+        * are ext4_get_block (growth) and ext4_truncate (shrinkth).
+        */
+       loff_t  i_disksize;
+
+       /* on-disk additional length */
+       __u16 i_extra_isize;
+
+       /*
+        * truncate_mutex is for serialising ext4_truncate() against
+        * ext4_getblock().  In the 2.4 ext2 design, great chunks of inode's
+        * data tree are chopped off during truncate. We can't do that in
+        * ext4 because whenever we perform intermediate commits during
+        * truncate, the inode and all the metadata blocks *must* be in a
+        * consistent state which allows truncation of the orphans to restart
+        * during recovery.  Hence we must fix the get_block-vs-truncate race
+        * by other means, so we have truncate_mutex.
+        */
+       struct mutex truncate_mutex;
+       struct inode vfs_inode;
+
+       unsigned long i_ext_generation;
+       struct ext4_ext_cache i_cached_extent;
+};
+
+#endif /* _LINUX_EXT4_FS_I */
diff --git a/include/linux/ext4_fs_sb.h b/include/linux/ext4_fs_sb.h
new file mode 100644 (file)
index 0000000..691a713
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ *  linux/include/linux/ext4_fs_sb.h
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ *  from
+ *
+ *  linux/include/linux/minix_fs_sb.h
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#ifndef _LINUX_EXT4_FS_SB
+#define _LINUX_EXT4_FS_SB
+
+#ifdef __KERNEL__
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/blockgroup_lock.h>
+#include <linux/percpu_counter.h>
+#endif
+#include <linux/rbtree.h>
+
+/*
+ * third extended-fs super-block data in memory
+ */
+struct ext4_sb_info {
+       unsigned long s_frag_size;      /* Size of a fragment in bytes */
+       unsigned long s_desc_size;      /* Size of a group descriptor in bytes */
+       unsigned long s_frags_per_block;/* Number of fragments per block */
+       unsigned long s_inodes_per_block;/* Number of inodes per block */
+       unsigned long s_frags_per_group;/* Number of fragments in a group */
+       unsigned long s_blocks_per_group;/* Number of blocks in a group */
+       unsigned long s_inodes_per_group;/* Number of inodes in a group */
+       unsigned long s_itb_per_group;  /* Number of inode table blocks per group */
+       unsigned long s_gdb_count;      /* Number of group descriptor blocks */
+       unsigned long s_desc_per_block; /* Number of group descriptors per block */
+       unsigned long s_groups_count;   /* Number of groups in the fs */
+       struct buffer_head * s_sbh;     /* Buffer containing the super block */
+       struct ext4_super_block * s_es; /* Pointer to the super block in the buffer */
+       struct buffer_head ** s_group_desc;
+       unsigned long  s_mount_opt;
+       uid_t s_resuid;
+       gid_t s_resgid;
+       unsigned short s_mount_state;
+       unsigned short s_pad;
+       int s_addr_per_block_bits;
+       int s_desc_per_block_bits;
+       int s_inode_size;
+       int s_first_ino;
+       spinlock_t s_next_gen_lock;
+       u32 s_next_generation;
+       u32 s_hash_seed[4];
+       int s_def_hash_version;
+       struct percpu_counter s_freeblocks_counter;
+       struct percpu_counter s_freeinodes_counter;
+       struct percpu_counter s_dirs_counter;
+       struct blockgroup_lock s_blockgroup_lock;
+
+       /* root of the per fs reservation window tree */
+       spinlock_t s_rsv_window_lock;
+       struct rb_root s_rsv_window_root;
+       struct ext4_reserve_window_node s_rsv_window_head;
+
+       /* Journaling */
+       struct inode * s_journal_inode;
+       struct journal_s * s_journal;
+       struct list_head s_orphan;
+       unsigned long s_commit_interval;
+       struct block_device *journal_bdev;
+#ifdef CONFIG_JBD_DEBUG
+       struct timer_list turn_ro_timer;        /* For turning read-only (crash simulation) */
+       wait_queue_head_t ro_wait_queue;        /* For people waiting for the fs to go read-only */
+#endif
+#ifdef CONFIG_QUOTA
+       char *s_qf_names[MAXQUOTAS];            /* Names of quota files with journalled quota */
+       int s_jquota_fmt;                       /* Format of quota to use */
+#endif
+
+#ifdef EXTENTS_STATS
+       /* ext4 extents stats */
+       unsigned long s_ext_min;
+       unsigned long s_ext_max;
+       unsigned long s_depth_max;
+       spinlock_t s_ext_stats_lock;
+       unsigned long s_ext_blocks;
+       unsigned long s_ext_extents;
+#endif
+};
+
+#endif /* _LINUX_EXT4_FS_SB */
diff --git a/include/linux/ext4_jbd2.h b/include/linux/ext4_jbd2.h
new file mode 100644 (file)
index 0000000..72dd631
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * linux/include/linux/ext4_jbd2.h
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
+ *
+ * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Ext4-specific journaling extensions.
+ */
+
+#ifndef _LINUX_EXT4_JBD_H
+#define _LINUX_EXT4_JBD_H
+
+#include <linux/fs.h>
+#include <linux/jbd2.h>
+#include <linux/ext4_fs.h>
+
+#define EXT4_JOURNAL(inode)    (EXT4_SB((inode)->i_sb)->s_journal)
+
+/* Define the number of blocks we need to account to a transaction to
+ * modify one block of data.
+ *
+ * We may have to touch one inode, one bitmap buffer, up to three
+ * indirection blocks, the group and superblock summaries, and the data
+ * block to complete the transaction.
+ *
+ * For extents-enabled fs we may have to allocate and modify up to
+ * 5 levels of tree + root which are stored in the inode. */
+
+#define EXT4_SINGLEDATA_TRANS_BLOCKS(sb)                               \
+       (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)   \
+               || test_opt(sb, EXTENTS) ? 27U : 8U)
+
+/* Extended attribute operations touch at most two data buffers,
+ * two bitmap buffers, and two group summaries, in addition to the inode
+ * and the superblock, which are already accounted for. */
+
+#define EXT4_XATTR_TRANS_BLOCKS                6U
+
+/* Define the minimum size for a transaction which modifies data.  This
+ * needs to take into account the fact that we may end up modifying two
+ * quota files too (one for the group, one for the user quota).  The
+ * superblock only gets updated once, of course, so don't bother
+ * counting that again for the quota updates. */
+
+#define EXT4_DATA_TRANS_BLOCKS(sb)     (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \
+                                        EXT4_XATTR_TRANS_BLOCKS - 2 + \
+                                        2*EXT4_QUOTA_TRANS_BLOCKS(sb))
+
+/* Delete operations potentially hit one directory's namespace plus an
+ * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
+ * generous.  We can grow the delete transaction later if necessary. */
+
+#define EXT4_DELETE_TRANS_BLOCKS(sb)   (2 * EXT4_DATA_TRANS_BLOCKS(sb) + 64)
+
+/* Define an arbitrary limit for the amount of data we will anticipate
+ * writing to any given transaction.  For unbounded transactions such as
+ * write(2) and truncate(2) we can write more than this, but we always
+ * start off at the maximum transaction size and grow the transaction
+ * optimistically as we go. */
+
+#define EXT4_MAX_TRANS_DATA            64U
+
+/* We break up a large truncate or write transaction once the handle's
+ * buffer credits gets this low, we need either to extend the
+ * transaction or to start a new one.  Reserve enough space here for
+ * inode, bitmap, superblock, group and indirection updates for at least
+ * one block, plus two quota updates.  Quota allocations are not
+ * needed. */
+
+#define EXT4_RESERVE_TRANS_BLOCKS      12U
+
+#define EXT4_INDEX_EXTRA_TRANS_BLOCKS  8
+
+#ifdef CONFIG_QUOTA
+/* Amount of blocks needed for quota update - we know that the structure was
+ * allocated so we need to update only inode+data */
+#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
+/* Amount of blocks needed for quota insert/delete - we do some block writes
+ * but inode, sb and group updates are done only once */
+#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
+               (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0)
+#define EXT4_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
+               (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0)
+#else
+#define EXT4_QUOTA_TRANS_BLOCKS(sb) 0
+#define EXT4_QUOTA_INIT_BLOCKS(sb) 0
+#define EXT4_QUOTA_DEL_BLOCKS(sb) 0
+#endif
+
+int
+ext4_mark_iloc_dirty(handle_t *handle,
+                    struct inode *inode,
+                    struct ext4_iloc *iloc);
+
+/*
+ * On success, We end up with an outstanding reference count against
+ * iloc->bh.  This _must_ be cleaned up later.
+ */
+
+int ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
+                       struct ext4_iloc *iloc);
+
+int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
+
+/*
+ * Wrapper functions with which ext4 calls into JBD.  The intent here is
+ * to allow these to be turned into appropriate stubs so ext4 can control
+ * ext2 filesystems, so ext2+ext4 systems only nee one fs.  This work hasn't
+ * been done yet.
+ */
+
+void ext4_journal_abort_handle(const char *caller, const char *err_fn,
+               struct buffer_head *bh, handle_t *handle, int err);
+
+static inline int
+__ext4_journal_get_undo_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh)
+{
+       int err = jbd2_journal_get_undo_access(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+static inline int
+__ext4_journal_get_write_access(const char *where, handle_t *handle,
+                               struct buffer_head *bh)
+{
+       int err = jbd2_journal_get_write_access(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+static inline void
+ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
+{
+       jbd2_journal_release_buffer(handle, bh);
+}
+
+static inline int
+__ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
+{
+       int err = jbd2_journal_forget(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+static inline int
+__ext4_journal_revoke(const char *where, handle_t *handle,
+                     ext4_fsblk_t blocknr, struct buffer_head *bh)
+{
+       int err = jbd2_journal_revoke(handle, blocknr, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+static inline int
+__ext4_journal_get_create_access(const char *where,
+                                handle_t *handle, struct buffer_head *bh)
+{
+       int err = jbd2_journal_get_create_access(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+static inline int
+__ext4_journal_dirty_metadata(const char *where,
+                             handle_t *handle, struct buffer_head *bh)
+{
+       int err = jbd2_journal_dirty_metadata(handle, bh);
+       if (err)
+               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+       return err;
+}
+
+
+#define ext4_journal_get_undo_access(handle, bh) \
+       __ext4_journal_get_undo_access(__FUNCTION__, (handle), (bh))
+#define ext4_journal_get_write_access(handle, bh) \
+       __ext4_journal_get_write_access(__FUNCTION__, (handle), (bh))
+#define ext4_journal_revoke(handle, blocknr, bh) \
+       __ext4_journal_revoke(__FUNCTION__, (handle), (blocknr), (bh))
+#define ext4_journal_get_create_access(handle, bh) \
+       __ext4_journal_get_create_access(__FUNCTION__, (handle), (bh))
+#define ext4_journal_dirty_metadata(handle, bh) \
+       __ext4_journal_dirty_metadata(__FUNCTION__, (handle), (bh))
+#define ext4_journal_forget(handle, bh) \
+       __ext4_journal_forget(__FUNCTION__, (handle), (bh))
+
+int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
+
+handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
+int __ext4_journal_stop(const char *where, handle_t *handle);
+
+static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
+{
+       return ext4_journal_start_sb(inode->i_sb, nblocks);
+}
+
+#define ext4_journal_stop(handle) \
+       __ext4_journal_stop(__FUNCTION__, (handle))
+
+static inline handle_t *ext4_journal_current_handle(void)
+{
+       return journal_current_handle();
+}
+
+static inline int ext4_journal_extend(handle_t *handle, int nblocks)
+{
+       return jbd2_journal_extend(handle, nblocks);
+}
+
+static inline int ext4_journal_restart(handle_t *handle, int nblocks)
+{
+       return jbd2_journal_restart(handle, nblocks);
+}
+
+static inline int ext4_journal_blocks_per_page(struct inode *inode)
+{
+       return jbd2_journal_blocks_per_page(inode);
+}
+
+static inline int ext4_journal_force_commit(journal_t *journal)
+{
+       return jbd2_journal_force_commit(journal);
+}
+
+/* super.c */
+int ext4_force_commit(struct super_block *sb);
+
+static inline int ext4_should_journal_data(struct inode *inode)
+{
+       if (!S_ISREG(inode->i_mode))
+               return 1;
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+               return 1;
+       if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
+               return 1;
+       return 0;
+}
+
+static inline int ext4_should_order_data(struct inode *inode)
+{
+       if (!S_ISREG(inode->i_mode))
+               return 0;
+       if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
+               return 0;
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+               return 1;
+       return 0;
+}
+
+static inline int ext4_should_writeback_data(struct inode *inode)
+{
+       if (!S_ISREG(inode->i_mode))
+               return 0;
+       if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
+               return 0;
+       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
+               return 1;
+       return 0;
+}
+
+#endif /* _LINUX_EXT4_JBD_H */
index 34406ed467c352c4ac4ad67b490c0729107c967d..661c7c572149207a8c3b237f8f0504d446418446 100644 (file)
@@ -656,7 +656,11 @@ static inline loff_t i_size_read(struct inode *inode)
 #endif
 }
 
-
+/*
+ * NOTE: unlike i_size_read(), i_size_write() does need locking around it
+ * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
+ * can be lost, resulting in subsequent i_size_read() calls spinning forever.
+ */
 static inline void i_size_write(struct inode *inode, loff_t i_size)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
index c25a38d8f600df00e8f852572d20b266c8cc7a40..5081d27bfa27ac22979dd4a2ad7a865b81701893 100644 (file)
@@ -17,6 +17,7 @@ int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *
 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
 int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
 void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
+void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
 int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
 int hugetlb_report_meminfo(char *);
 int hugetlb_report_node_meminfo(int, char *);
index 2ad96c3f0e4e9dc06d0f9e199c3ec1387372b41b..81877ea39309aea73cef4a63a3212c69ab5c9131 100644 (file)
@@ -28,4 +28,31 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
 int ioremap_page_range(unsigned long addr, unsigned long end,
                       unsigned long phys_addr, pgprot_t prot);
 
+/**
+ *     check_signature         -       find BIOS signatures
+ *     @io_addr: mmio address to check
+ *     @signature:  signature block
+ *     @length: length of signature
+ *
+ *     Perform a signature comparison with the mmio address io_addr. This
+ *     address should have been obtained by ioremap.
+ *     Returns 1 on a match.
+ */
+
+static inline int check_signature(const volatile void __iomem *io_addr,
+       const unsigned char *signature, int length)
+{
+       int retval = 0;
+       do {
+               if (readb(io_addr) != *signature)
+                       goto out;
+               io_addr++;
+               signature++;
+               length--;
+       } while (length);
+       retval = 1;
+out:
+       return retval;
+}
+
 #endif /* _LINUX_IO_H */
index c64f3cc7e870050bc3acc3b84c684293829b1b87..775f5a7da493ce78238fb852a2c6f03e1a777421 100644 (file)
@@ -141,6 +141,7 @@ struct irq_chip {
  * @pending_mask:      pending rebalanced interrupts
  * @dir:               /proc/irq/ procfs entry
  * @affinity_entry:    /proc/irq/smp_affinity procfs entry on SMP
+ * @name:              flow handler name for /proc/interrupts output
  *
  * Pad this out to 32 bytes for cache and indexing reasons.
  */
@@ -165,8 +166,9 @@ struct irq_desc {
        cpumask_t               pending_mask;
 #endif
 #ifdef CONFIG_PROC_FS
-       struct proc_dir_entry *dir;
+       struct proc_dir_entry   *dir;
 #endif
+       const char              *name;
 } ____cacheline_aligned;
 
 extern struct irq_desc irq_desc[NR_IRQS];
@@ -271,12 +273,6 @@ extern void fastcall handle_simple_irq(unsigned int irq, struct irq_desc *desc);
 extern void fastcall handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
 extern void fastcall handle_bad_irq(unsigned int irq, struct irq_desc *desc);
 
-/*
- * Get a descriptive string for the highlevel handler, for
- * /proc/interrupts output:
- */
-extern const char *handle_irq_name(irq_flow_handler_t handle);
-
 /*
  * Monolithic do_IRQ implementation.
  * (is an explicit fastcall, because i386 4KSTACKS calls it from assembly)
@@ -326,10 +322,12 @@ extern struct irq_chip no_irq_chip;
 extern struct irq_chip dummy_irq_chip;
 
 extern void
-set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
-                        irq_flow_handler_t handle);
+set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+                             irq_flow_handler_t handle, const char *name);
+
 extern void
-__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained);
+__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+                 const char *name);
 
 /*
  * Set a highlevel flow handler for a given IRQ:
@@ -337,7 +335,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained);
 static inline void
 set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
 {
-       __set_irq_handler(irq, handle, 0);
+       __set_irq_handler(irq, handle, 0, NULL);
 }
 
 /*
@@ -349,7 +347,7 @@ static inline void
 set_irq_chained_handler(unsigned int irq,
                        irq_flow_handler_t handle)
 {
-       __set_irq_handler(irq, handle, 1);
+       __set_irq_handler(irq, handle, 1, NULL);
 }
 
 /* Handle dynamic irq creation and destruction */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
new file mode 100644 (file)
index 0000000..ddb1287
--- /dev/null
@@ -0,0 +1,1107 @@
+/*
+ * linux/include/linux/jbd2.h
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>
+ *
+ * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Definitions for transaction data structures for the buffer cache
+ * filesystem journaling support.
+ */
+
+#ifndef _LINUX_JBD_H
+#define _LINUX_JBD_H
+
+/* Allow this file to be included directly into e2fsprogs */
+#ifndef __KERNEL__
+#include "jfs_compat.h"
+#define JBD2_DEBUG
+#define jfs_debug jbd_debug
+#else
+
+#include <linux/types.h>
+#include <linux/buffer_head.h>
+#include <linux/journal-head.h>
+#include <linux/stddef.h>
+#include <linux/bit_spinlock.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+
+#include <asm/semaphore.h>
+#endif
+
+#define journal_oom_retry 1
+
+/*
+ * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds
+ * certain classes of error which can occur due to failed IOs.  Under
+ * normal use we want ext3 to continue after such errors, because
+ * hardware _can_ fail, but for debugging purposes when running tests on
+ * known-good hardware we may want to trap these errors.
+ */
+#undef JBD_PARANOID_IOFAIL
+
+/*
+ * The default maximum commit age, in seconds.
+ */
+#define JBD_DEFAULT_MAX_COMMIT_AGE 5
+
+#ifdef CONFIG_JBD_DEBUG
+/*
+ * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
+ * consistency checks.  By default we don't do this unless
+ * CONFIG_JBD_DEBUG is on.
+ */
+#define JBD_EXPENSIVE_CHECKING
+extern int jbd2_journal_enable_debug;
+
+#define jbd_debug(n, f, a...)                                          \
+       do {                                                            \
+               if ((n) <= jbd2_journal_enable_debug) {                 \
+                       printk (KERN_DEBUG "(%s, %d): %s: ",            \
+                               __FILE__, __LINE__, __FUNCTION__);      \
+                       printk (f, ## a);                               \
+               }                                                       \
+       } while (0)
+#else
+#define jbd_debug(f, a...)     /**/
+#endif
+
+extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
+extern void * jbd2_slab_alloc(size_t size, gfp_t flags);
+extern void jbd2_slab_free(void *ptr, size_t size);
+
+#define jbd_kmalloc(size, flags) \
+       __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
+#define jbd_rep_kmalloc(size, flags) \
+       __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1)
+
+#define JBD2_MIN_JOURNAL_BLOCKS 1024
+
+#ifdef __KERNEL__
+
+/**
+ * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
+ *
+ * All filesystem modifications made by the process go
+ * through this handle.  Recursive operations (such as quota operations)
+ * are gathered into a single update.
+ *
+ * The buffer credits field is used to account for journaled buffers
+ * being modified by the running process.  To ensure that there is
+ * enough log space for all outstanding operations, we need to limit the
+ * number of outstanding buffers possible at any time.  When the
+ * operation completes, any buffer credits not used are credited back to
+ * the transaction, so that at all times we know how many buffers the
+ * outstanding updates on a transaction might possibly touch.
+ *
+ * This is an opaque datatype.
+ **/
+typedef struct handle_s                handle_t;       /* Atomic operation type */
+
+
+/**
+ * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
+ *
+ * journal_t is linked to from the fs superblock structure.
+ *
+ * We use the journal_t to keep track of all outstanding transaction
+ * activity on the filesystem, and to manage the state of the log
+ * writing process.
+ *
+ * This is an opaque datatype.
+ **/
+typedef struct journal_s       journal_t;      /* Journal control structure */
+#endif
+
+/*
+ * Internal structures used by the logging mechanism:
+ */
+
+#define JBD2_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
+
+/*
+ * On-disk structures
+ */
+
+/*
+ * Descriptor block types:
+ */
+
+#define JBD2_DESCRIPTOR_BLOCK  1
+#define JBD2_COMMIT_BLOCK      2
+#define JBD2_SUPERBLOCK_V1     3
+#define JBD2_SUPERBLOCK_V2     4
+#define JBD2_REVOKE_BLOCK      5
+
+/*
+ * Standard header for all descriptor blocks:
+ */
+typedef struct journal_header_s
+{
+       __be32          h_magic;
+       __be32          h_blocktype;
+       __be32          h_sequence;
+} journal_header_t;
+
+
+/*
+ * The block tag: used to describe a single buffer in the journal.
+ * t_blocknr_high is only used if INCOMPAT_64BIT is set, so this
+ * raw struct shouldn't be used for pointer math or sizeof() - use
+ * journal_tag_bytes(journal) instead to compute this.
+ */
+typedef struct journal_block_tag_s
+{
+       __be32          t_blocknr;      /* The on-disk block number */
+       __be32          t_flags;        /* See below */
+       __be32          t_blocknr_high; /* most-significant high 32bits. */
+} journal_block_tag_t;
+
+#define JBD_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
+#define JBD_TAG_SIZE64 (sizeof(journal_block_tag_t))
+
+/*
+ * The revoke descriptor: used on disk to describe a series of blocks to
+ * be revoked from the log
+ */
+typedef struct jbd2_journal_revoke_header_s
+{
+       journal_header_t r_header;
+       __be32           r_count;       /* Count of bytes used in the block */
+} jbd2_journal_revoke_header_t;
+
+
+/* Definitions for the journal tag flags word: */
+#define JBD2_FLAG_ESCAPE               1       /* on-disk block is escaped */
+#define JBD2_FLAG_SAME_UUID    2       /* block has same uuid as previous */
+#define JBD2_FLAG_DELETED      4       /* block deleted by this transaction */
+#define JBD2_FLAG_LAST_TAG     8       /* last tag in this descriptor block */
+
+
+/*
+ * The journal superblock.  All fields are in big-endian byte order.
+ */
+typedef struct journal_superblock_s
+{
+/* 0x0000 */
+       journal_header_t s_header;
+
+/* 0x000C */
+       /* Static information describing the journal */
+       __be32  s_blocksize;            /* journal device blocksize */
+       __be32  s_maxlen;               /* total blocks in journal file */
+       __be32  s_first;                /* first block of log information */
+
+/* 0x0018 */
+       /* Dynamic information describing the current state of the log */
+       __be32  s_sequence;             /* first commit ID expected in log */
+       __be32  s_start;                /* blocknr of start of log */
+
+/* 0x0020 */
+       /* Error value, as set by jbd2_journal_abort(). */
+       __be32  s_errno;
+
+/* 0x0024 */
+       /* Remaining fields are only valid in a version-2 superblock */
+       __be32  s_feature_compat;       /* compatible feature set */
+       __be32  s_feature_incompat;     /* incompatible feature set */
+       __be32  s_feature_ro_compat;    /* readonly-compatible feature set */
+/* 0x0030 */
+       __u8    s_uuid[16];             /* 128-bit uuid for journal */
+
+/* 0x0040 */
+       __be32  s_nr_users;             /* Nr of filesystems sharing log */
+
+       __be32  s_dynsuper;             /* Blocknr of dynamic superblock copy*/
+
+/* 0x0048 */
+       __be32  s_max_transaction;      /* Limit of journal blocks per trans.*/
+       __be32  s_max_trans_data;       /* Limit of data blocks per trans. */
+
+/* 0x0050 */
+       __u32   s_padding[44];
+
+/* 0x0100 */
+       __u8    s_users[16*48];         /* ids of all fs'es sharing the log */
+/* 0x0400 */
+} journal_superblock_t;
+
+#define JBD2_HAS_COMPAT_FEATURE(j,mask)                                        \
+       ((j)->j_format_version >= 2 &&                                  \
+        ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
+#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask)                             \
+       ((j)->j_format_version >= 2 &&                                  \
+        ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
+#define JBD2_HAS_INCOMPAT_FEATURE(j,mask)                              \
+       ((j)->j_format_version >= 2 &&                                  \
+        ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
+
+#define JBD2_FEATURE_INCOMPAT_REVOKE   0x00000001
+#define JBD2_FEATURE_INCOMPAT_64BIT    0x00000002
+
+/* Features known to this kernel version: */
+#define JBD2_KNOWN_COMPAT_FEATURES     0
+#define JBD2_KNOWN_ROCOMPAT_FEATURES   0
+#define JBD2_KNOWN_INCOMPAT_FEATURES   (JBD2_FEATURE_INCOMPAT_REVOKE | \
+                                        JBD2_FEATURE_INCOMPAT_64BIT)
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+
+#define JBD_ASSERTIONS
+#ifdef JBD_ASSERTIONS
+#define J_ASSERT(assert)                                               \
+do {                                                                   \
+       if (!(assert)) {                                                \
+               printk (KERN_EMERG                                      \
+                       "Assertion failure in %s() at %s:%d: \"%s\"\n", \
+                       __FUNCTION__, __FILE__, __LINE__, # assert);    \
+               BUG();                                                  \
+       }                                                               \
+} while (0)
+
+#if defined(CONFIG_BUFFER_DEBUG)
+void buffer_assertion_failure(struct buffer_head *bh);
+#define J_ASSERT_BH(bh, expr)                                          \
+       do {                                                            \
+               if (!(expr))                                            \
+                       buffer_assertion_failure(bh);                   \
+               J_ASSERT(expr);                                         \
+       } while (0)
+#define J_ASSERT_JH(jh, expr)  J_ASSERT_BH(jh2bh(jh), expr)
+#else
+#define J_ASSERT_BH(bh, expr)  J_ASSERT(expr)
+#define J_ASSERT_JH(jh, expr)  J_ASSERT(expr)
+#endif
+
+#else
+#define J_ASSERT(assert)       do { } while (0)
+#endif         /* JBD_ASSERTIONS */
+
+#if defined(JBD_PARANOID_IOFAIL)
+#define J_EXPECT(expr, why...)         J_ASSERT(expr)
+#define J_EXPECT_BH(bh, expr, why...)  J_ASSERT_BH(bh, expr)
+#define J_EXPECT_JH(jh, expr, why...)  J_ASSERT_JH(jh, expr)
+#else
+#define __journal_expect(expr, why...)                                      \
+       ({                                                                   \
+               int val = (expr);                                            \
+               if (!val) {                                                  \
+                       printk(KERN_ERR                                      \
+                               "EXT3-fs unexpected failure: %s;\n",# expr); \
+                       printk(KERN_ERR why "\n");                           \
+               }                                                            \
+               val;                                                         \
+       })
+#define J_EXPECT(expr, why...)         __journal_expect(expr, ## why)
+#define J_EXPECT_BH(bh, expr, why...)  __journal_expect(expr, ## why)
+#define J_EXPECT_JH(jh, expr, why...)  __journal_expect(expr, ## why)
+#endif
+
+enum jbd_state_bits {
+       BH_JBD                  /* Has an attached ext3 journal_head */
+         = BH_PrivateStart,
+       BH_JWrite,              /* Being written to log (@@@ DEBUGGING) */
+       BH_Freed,               /* Has been freed (truncated) */
+       BH_Revoked,             /* Has been revoked from the log */
+       BH_RevokeValid,         /* Revoked flag is valid */
+       BH_JBDDirty,            /* Is dirty but journaled */
+       BH_State,               /* Pins most journal_head state */
+       BH_JournalHead,         /* Pins bh->b_private and jh->b_bh */
+       BH_Unshadow,            /* Dummy bit, for BJ_Shadow wakeup filtering */
+};
+
+BUFFER_FNS(JBD, jbd)
+BUFFER_FNS(JWrite, jwrite)
+BUFFER_FNS(JBDDirty, jbddirty)
+TAS_BUFFER_FNS(JBDDirty, jbddirty)
+BUFFER_FNS(Revoked, revoked)
+TAS_BUFFER_FNS(Revoked, revoked)
+BUFFER_FNS(RevokeValid, revokevalid)
+TAS_BUFFER_FNS(RevokeValid, revokevalid)
+BUFFER_FNS(Freed, freed)
+
+static inline struct buffer_head *jh2bh(struct journal_head *jh)
+{
+       return jh->b_bh;
+}
+
+static inline struct journal_head *bh2jh(struct buffer_head *bh)
+{
+       return bh->b_private;
+}
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+       bit_spin_lock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+       return bit_spin_trylock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+       return bit_spin_is_locked(BH_State, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+       bit_spin_unlock(BH_State, &bh->b_state);
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+       bit_spin_lock(BH_JournalHead, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+       bit_spin_unlock(BH_JournalHead, &bh->b_state);
+}
+
+struct jbd2_revoke_table_s;
+
+/**
+ * struct handle_s - The handle_s type is the concrete type associated with
+ *     handle_t.
+ * @h_transaction: Which compound transaction is this update a part of?
+ * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
+ * @h_ref: Reference count on this handle
+ * @h_err: Field for caller's use to track errors through large fs operations
+ * @h_sync: flag for sync-on-close
+ * @h_jdata: flag to force data journaling
+ * @h_aborted: flag indicating fatal error on handle
+ **/
+
+/* Docbook can't yet cope with the bit fields, but will leave the documentation
+ * in so it can be fixed later.
+ */
+
+struct handle_s
+{
+       /* Which compound transaction is this update a part of? */
+       transaction_t           *h_transaction;
+
+       /* Number of remaining buffers we are allowed to dirty: */
+       int                     h_buffer_credits;
+
+       /* Reference count on this handle */
+       int                     h_ref;
+
+       /* Field for caller's use to track errors through large fs */
+       /* operations */
+       int                     h_err;
+
+       /* Flags [no locking] */
+       unsigned int    h_sync:         1;      /* sync-on-close */
+       unsigned int    h_jdata:        1;      /* force data journaling */
+       unsigned int    h_aborted:      1;      /* fatal error on handle */
+};
+
+
+/* The transaction_t type is the guts of the journaling mechanism.  It
+ * tracks a compound transaction through its various states:
+ *
+ * RUNNING:    accepting new updates
+ * LOCKED:     Updates still running but we don't accept new ones
+ * RUNDOWN:    Updates are tidying up but have finished requesting
+ *             new buffers to modify (state not used for now)
+ * FLUSH:       All updates complete, but we are still writing to disk
+ * COMMIT:      All data on disk, writing commit record
+ * FINISHED:   We still have to keep the transaction for checkpointing.
+ *
+ * The transaction keeps track of all of the buffers modified by a
+ * running transaction, and all of the buffers committed but not yet
+ * flushed to home for finished transactions.
+ */
+
+/*
+ * Lock ranking:
+ *
+ *    j_list_lock
+ *      ->jbd_lock_bh_journal_head()   (This is "innermost")
+ *
+ *    j_state_lock
+ *    ->jbd_lock_bh_state()
+ *
+ *    jbd_lock_bh_state()
+ *    ->j_list_lock
+ *
+ *    j_state_lock
+ *    ->t_handle_lock
+ *
+ *    j_state_lock
+ *    ->j_list_lock                    (journal_unmap_buffer)
+ *
+ */
+
+struct transaction_s
+{
+       /* Pointer to the journal for this transaction. [no locking] */
+       journal_t               *t_journal;
+
+       /* Sequence number for this transaction [no locking] */
+       tid_t                   t_tid;
+
+       /*
+        * Transaction's current state
+        * [no locking - only kjournald2 alters this]
+        * FIXME: needs barriers
+        * KLUDGE: [use j_state_lock]
+        */
+       enum {
+               T_RUNNING,
+               T_LOCKED,
+               T_RUNDOWN,
+               T_FLUSH,
+               T_COMMIT,
+               T_FINISHED
+       }                       t_state;
+
+       /*
+        * Where in the log does this transaction's commit start? [no locking]
+        */
+       unsigned long           t_log_start;
+
+       /* Number of buffers on the t_buffers list [j_list_lock] */
+       int                     t_nr_buffers;
+
+       /*
+        * Doubly-linked circular list of all buffers reserved but not yet
+        * modified by this transaction [j_list_lock]
+        */
+       struct journal_head     *t_reserved_list;
+
+       /*
+        * Doubly-linked circular list of all buffers under writeout during
+        * commit [j_list_lock]
+        */
+       struct journal_head     *t_locked_list;
+
+       /*
+        * Doubly-linked circular list of all metadata buffers owned by this
+        * transaction [j_list_lock]
+        */
+       struct journal_head     *t_buffers;
+
+       /*
+        * Doubly-linked circular list of all data buffers still to be
+        * flushed before this transaction can be committed [j_list_lock]
+        */
+       struct journal_head     *t_sync_datalist;
+
+       /*
+        * Doubly-linked circular list of all forget buffers (superseded
+        * buffers which we can un-checkpoint once this transaction commits)
+        * [j_list_lock]
+        */
+       struct journal_head     *t_forget;
+
+       /*
+        * Doubly-linked circular list of all buffers still to be flushed before
+        * this transaction can be checkpointed. [j_list_lock]
+        */
+       struct journal_head     *t_checkpoint_list;
+
+       /*
+        * Doubly-linked circular list of all buffers submitted for IO while
+        * checkpointing. [j_list_lock]
+        */
+       struct journal_head     *t_checkpoint_io_list;
+
+       /*
+        * Doubly-linked circular list of temporary buffers currently undergoing
+        * IO in the log [j_list_lock]
+        */
+       struct journal_head     *t_iobuf_list;
+
+       /*
+        * Doubly-linked circular list of metadata buffers being shadowed by log
+        * IO.  The IO buffers on the iobuf list and the shadow buffers on this
+        * list match each other one for one at all times. [j_list_lock]
+        */
+       struct journal_head     *t_shadow_list;
+
+       /*
+        * Doubly-linked circular list of control buffers being written to the
+        * log. [j_list_lock]
+        */
+       struct journal_head     *t_log_list;
+
+       /*
+        * Protects info related to handles
+        */
+       spinlock_t              t_handle_lock;
+
+       /*
+        * Number of outstanding updates running on this transaction
+        * [t_handle_lock]
+        */
+       int                     t_updates;
+
+       /*
+        * Number of buffers reserved for use by all handles in this transaction
+        * handle but not yet modified. [t_handle_lock]
+        */
+       int                     t_outstanding_credits;
+
+       /*
+        * Forward and backward links for the circular list of all transactions
+        * awaiting checkpoint. [j_list_lock]
+        */
+       transaction_t           *t_cpnext, *t_cpprev;
+
+       /*
+        * When will the transaction expire (become due for commit), in jiffies?
+        * [no locking]
+        */
+       unsigned long           t_expires;
+
+       /*
+        * How many handles used this transaction? [t_handle_lock]
+        */
+       int t_handle_count;
+
+};
+
+/**
+ * struct journal_s - The journal_s type is the concrete type associated with
+ *     journal_t.
+ * @j_flags:  General journaling state flags
+ * @j_errno:  Is there an outstanding uncleared error on the journal (from a
+ *     prior abort)?
+ * @j_sb_buffer: First part of superblock buffer
+ * @j_superblock: Second part of superblock buffer
+ * @j_format_version: Version of the superblock format
+ * @j_state_lock: Protect the various scalars in the journal
+ * @j_barrier_count:  Number of processes waiting to create a barrier lock
+ * @j_barrier: The barrier lock itself
+ * @j_running_transaction: The current running transaction..
+ * @j_committing_transaction: the transaction we are pushing to disk
+ * @j_checkpoint_transactions: a linked circular list of all transactions
+ *  waiting for checkpointing
+ * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
+ *  to start committing, or for a barrier lock to be released
+ * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
+ * @j_wait_done_commit: Wait queue for waiting for commit to complete
+ * @j_wait_checkpoint:  Wait queue to trigger checkpointing
+ * @j_wait_commit: Wait queue to trigger commit
+ * @j_wait_updates: Wait queue to wait for updates to complete
+ * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
+ * @j_head: Journal head - identifies the first unused block in the journal
+ * @j_tail: Journal tail - identifies the oldest still-used block in the
+ *  journal.
+ * @j_free: Journal free - how many free blocks are there in the journal?
+ * @j_first: The block number of the first usable block
+ * @j_last: The block number one beyond the last usable block
+ * @j_dev: Device where we store the journal
+ * @j_blocksize: blocksize for the location where we store the journal.
+ * @j_blk_offset: starting block offset for into the device where we store the
+ *     journal
+ * @j_fs_dev: Device which holds the client fs.  For internal journal this will
+ *     be equal to j_dev
+ * @j_maxlen: Total maximum capacity of the journal region on disk.
+ * @j_list_lock: Protects the buffer lists and internal buffer state.
+ * @j_inode: Optional inode where we store the journal.  If present, all journal
+ *     block numbers are mapped into this inode via bmap().
+ * @j_tail_sequence:  Sequence number of the oldest transaction in the log
+ * @j_transaction_sequence: Sequence number of the next transaction to grant
+ * @j_commit_sequence: Sequence number of the most recently committed
+ *  transaction
+ * @j_commit_request: Sequence number of the most recent transaction wanting
+ *     commit
+ * @j_uuid: Uuid of client object.
+ * @j_task: Pointer to the current commit thread for this journal
+ * @j_max_transaction_buffers:  Maximum number of metadata buffers to allow in a
+ *     single compound commit transaction
+ * @j_commit_interval: What is the maximum transaction lifetime before we begin
+ *  a commit?
+ * @j_commit_timer:  The timer used to wakeup the commit thread
+ * @j_revoke_lock: Protect the revoke table
+ * @j_revoke: The revoke table - maintains the list of revoked blocks in the
+ *     current transaction.
+ * @j_revoke_table: alternate revoke tables for j_revoke
+ * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
+ * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
+ *     number that will fit in j_blocksize
+ * @j_last_sync_writer: most recent pid which did a synchronous write
+ * @j_private: An opaque pointer to fs-private information.
+ */
+
+struct journal_s
+{
+       /* General journaling state flags [j_state_lock] */
+       unsigned long           j_flags;
+
+       /*
+        * Is there an outstanding uncleared error on the journal (from a prior
+        * abort)? [j_state_lock]
+        */
+       int                     j_errno;
+
+       /* The superblock buffer */
+       struct buffer_head      *j_sb_buffer;
+       journal_superblock_t    *j_superblock;
+
+       /* Version of the superblock format */
+       int                     j_format_version;
+
+       /*
+        * Protect the various scalars in the journal
+        */
+       spinlock_t              j_state_lock;
+
+       /*
+        * Number of processes waiting to create a barrier lock [j_state_lock]
+        */
+       int                     j_barrier_count;
+
+       /* The barrier lock itself */
+       struct mutex            j_barrier;
+
+       /*
+        * Transactions: The current running transaction...
+        * [j_state_lock] [caller holding open handle]
+        */
+       transaction_t           *j_running_transaction;
+
+       /*
+        * the transaction we are pushing to disk
+        * [j_state_lock] [caller holding open handle]
+        */
+       transaction_t           *j_committing_transaction;
+
+       /*
+        * ... and a linked circular list of all transactions waiting for
+        * checkpointing. [j_list_lock]
+        */
+       transaction_t           *j_checkpoint_transactions;
+
+       /*
+        * Wait queue for waiting for a locked transaction to start committing,
+        * or for a barrier lock to be released
+        */
+       wait_queue_head_t       j_wait_transaction_locked;
+
+       /* Wait queue for waiting for checkpointing to complete */
+       wait_queue_head_t       j_wait_logspace;
+
+       /* Wait queue for waiting for commit to complete */
+       wait_queue_head_t       j_wait_done_commit;
+
+       /* Wait queue to trigger checkpointing */
+       wait_queue_head_t       j_wait_checkpoint;
+
+       /* Wait queue to trigger commit */
+       wait_queue_head_t       j_wait_commit;
+
+       /* Wait queue to wait for updates to complete */
+       wait_queue_head_t       j_wait_updates;
+
+       /* Semaphore for locking against concurrent checkpoints */
+       struct mutex            j_checkpoint_mutex;
+
+       /*
+        * Journal head: identifies the first unused block in the journal.
+        * [j_state_lock]
+        */
+       unsigned long           j_head;
+
+       /*
+        * Journal tail: identifies the oldest still-used block in the journal.
+        * [j_state_lock]
+        */
+       unsigned long           j_tail;
+
+       /*
+        * Journal free: how many free blocks are there in the journal?
+        * [j_state_lock]
+        */
+       unsigned long           j_free;
+
+       /*
+        * Journal start and end: the block numbers of the first usable block
+        * and one beyond the last usable block in the journal. [j_state_lock]
+        */
+       unsigned long           j_first;
+       unsigned long           j_last;
+
+       /*
+        * Device, blocksize and starting block offset for the location where we
+        * store the journal.
+        */
+       struct block_device     *j_dev;
+       int                     j_blocksize;
+       unsigned long long              j_blk_offset;
+
+       /*
+        * Device which holds the client fs.  For internal journal this will be
+        * equal to j_dev.
+        */
+       struct block_device     *j_fs_dev;
+
+       /* Total maximum capacity of the journal region on disk. */
+       unsigned int            j_maxlen;
+
+       /*
+        * Protects the buffer lists and internal buffer state.
+        */
+       spinlock_t              j_list_lock;
+
+       /* Optional inode where we store the journal.  If present, all */
+       /* journal block numbers are mapped into this inode via */
+       /* bmap(). */
+       struct inode            *j_inode;
+
+       /*
+        * Sequence number of the oldest transaction in the log [j_state_lock]
+        */
+       tid_t                   j_tail_sequence;
+
+       /*
+        * Sequence number of the next transaction to grant [j_state_lock]
+        */
+       tid_t                   j_transaction_sequence;
+
+       /*
+        * Sequence number of the most recently committed transaction
+        * [j_state_lock].
+        */
+       tid_t                   j_commit_sequence;
+
+       /*
+        * Sequence number of the most recent transaction wanting commit
+        * [j_state_lock]
+        */
+       tid_t                   j_commit_request;
+
+       /*
+        * Journal uuid: identifies the object (filesystem, LVM volume etc)
+        * backed by this journal.  This will eventually be replaced by an array
+        * of uuids, allowing us to index multiple devices within a single
+        * journal and to perform atomic updates across them.
+        */
+       __u8                    j_uuid[16];
+
+       /* Pointer to the current commit thread for this journal */
+       struct task_struct      *j_task;
+
+       /*
+        * Maximum number of metadata buffers to allow in a single compound
+        * commit transaction
+        */
+       int                     j_max_transaction_buffers;
+
+       /*
+        * What is the maximum transaction lifetime before we begin a commit?
+        */
+       unsigned long           j_commit_interval;
+
+       /* The timer used to wakeup the commit thread: */
+       struct timer_list       j_commit_timer;
+
+       /*
+        * The revoke table: maintains the list of revoked blocks in the
+        * current transaction.  [j_revoke_lock]
+        */
+       spinlock_t              j_revoke_lock;
+       struct jbd2_revoke_table_s *j_revoke;
+       struct jbd2_revoke_table_s *j_revoke_table[2];
+
+       /*
+        * array of bhs for jbd2_journal_commit_transaction
+        */
+       struct buffer_head      **j_wbuf;
+       int                     j_wbufsize;
+
+       pid_t                   j_last_sync_writer;
+
+       /*
+        * An opaque pointer to fs-private information.  ext3 puts its
+        * superblock pointer here
+        */
+       void *j_private;
+};
+
+/*
+ * Journal flag definitions
+ */
+#define JBD2_UNMOUNT   0x001   /* Journal thread is being destroyed */
+#define JBD2_ABORT     0x002   /* Journaling has been aborted for errors. */
+#define JBD2_ACK_ERR   0x004   /* The errno in the sb has been acked */
+#define JBD2_FLUSHED   0x008   /* The journal superblock has been flushed */
+#define JBD2_LOADED    0x010   /* The journal superblock has been loaded */
+#define JBD2_BARRIER   0x020   /* Use IDE barriers */
+
+/*
+ * Function declarations for the journaling transaction and buffer
+ * management
+ */
+
+/* Filing buffers */
+extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
+extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
+extern void __jbd2_journal_unfile_buffer(struct journal_head *);
+extern void __jbd2_journal_refile_buffer(struct journal_head *);
+extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
+extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void __journal_free_buffer(struct journal_head *bh);
+extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void __journal_clean_data_list(transaction_t *transaction);
+
+/* Log buffer allocation */
+extern struct journal_head * jbd2_journal_get_descriptor_buffer(journal_t *);
+int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
+
+/* Commit management */
+extern void jbd2_journal_commit_transaction(journal_t *);
+
+/* Checkpoint list management */
+int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
+
+/* Buffer IO */
+extern int
+jbd2_journal_write_metadata_buffer(transaction_t         *transaction,
+                             struct journal_head  *jh_in,
+                             struct journal_head **jh_out,
+                             unsigned long long   blocknr);
+
+/* Transaction locking */
+extern void            __wait_on_journal (journal_t *);
+
+/*
+ * Journal locking.
+ *
+ * We need to lock the journal during transaction state changes so that nobody
+ * ever tries to take a handle on the running transaction while we are in the
+ * middle of moving it to the commit phase.  j_state_lock does this.
+ *
+ * Note that the locking is completely interrupt unsafe.  We never touch
+ * journal structures from interrupts.
+ */
+
+static inline handle_t *journal_current_handle(void)
+{
+       return current->journal_info;
+}
+
+/* The journaling code user interface:
+ *
+ * Create and destroy handles
+ * Register buffer modifications against the current transaction.
+ */
+
+extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
+extern int      jbd2_journal_restart (handle_t *, int nblocks);
+extern int      jbd2_journal_extend (handle_t *, int nblocks);
+extern int      jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
+extern int      jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
+extern int      jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
+extern int      jbd2_journal_dirty_data (handle_t *, struct buffer_head *);
+extern int      jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
+extern void     jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
+extern int      jbd2_journal_forget (handle_t *, struct buffer_head *);
+extern void     journal_sync_buffer (struct buffer_head *);
+extern void     jbd2_journal_invalidatepage(journal_t *,
+                               struct page *, unsigned long);
+extern int      jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+extern int      jbd2_journal_stop(handle_t *);
+extern int      jbd2_journal_flush (journal_t *);
+extern void     jbd2_journal_lock_updates (journal_t *);
+extern void     jbd2_journal_unlock_updates (journal_t *);
+
+extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
+                               struct block_device *fs_dev,
+                               unsigned long long start, int len, int bsize);
+extern journal_t * jbd2_journal_init_inode (struct inode *);
+extern int        jbd2_journal_update_format (journal_t *);
+extern int        jbd2_journal_check_used_features
+                  (journal_t *, unsigned long, unsigned long, unsigned long);
+extern int        jbd2_journal_check_available_features
+                  (journal_t *, unsigned long, unsigned long, unsigned long);
+extern int        jbd2_journal_set_features
+                  (journal_t *, unsigned long, unsigned long, unsigned long);
+extern int        jbd2_journal_create     (journal_t *);
+extern int        jbd2_journal_load       (journal_t *journal);
+extern void       jbd2_journal_destroy    (journal_t *);
+extern int        jbd2_journal_recover    (journal_t *journal);
+extern int        jbd2_journal_wipe       (journal_t *, int);
+extern int        jbd2_journal_skip_recovery   (journal_t *);
+extern void       jbd2_journal_update_superblock       (journal_t *, int);
+extern void       __jbd2_journal_abort_hard    (journal_t *);
+extern void       jbd2_journal_abort      (journal_t *, int);
+extern int        jbd2_journal_errno      (journal_t *);
+extern void       jbd2_journal_ack_err    (journal_t *);
+extern int        jbd2_journal_clear_err  (journal_t *);
+extern int        jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
+extern int        jbd2_journal_force_commit(journal_t *);
+
+/*
+ * journal_head management
+ */
+struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
+struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
+void jbd2_journal_remove_journal_head(struct buffer_head *bh);
+void jbd2_journal_put_journal_head(struct journal_head *jh);
+
+/*
+ * handle management
+ */
+extern kmem_cache_t *jbd2_handle_cache;
+
+static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
+{
+       return kmem_cache_alloc(jbd2_handle_cache, gfp_flags);
+}
+
+static inline void jbd_free_handle(handle_t *handle)
+{
+       kmem_cache_free(jbd2_handle_cache, handle);
+}
+
+/* Primary revoke support */
+#define JOURNAL_REVOKE_DEFAULT_HASH 256
+extern int        jbd2_journal_init_revoke(journal_t *, int);
+extern void       jbd2_journal_destroy_revoke_caches(void);
+extern int        jbd2_journal_init_revoke_caches(void);
+
+extern void       jbd2_journal_destroy_revoke(journal_t *);
+extern int        jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
+extern int        jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
+extern void       jbd2_journal_write_revoke_records(journal_t *, transaction_t *);
+
+/* Recovery revoke support */
+extern int     jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
+extern int     jbd2_journal_test_revoke(journal_t *, unsigned long long, tid_t);
+extern void    jbd2_journal_clear_revoke(journal_t *);
+extern void    jbd2_journal_switch_revoke_table(journal_t *journal);
+
+/*
+ * The log thread user interface:
+ *
+ * Request space in the current transaction, and force transaction commit
+ * transitions on demand.
+ */
+
+int __jbd2_log_space_left(journal_t *); /* Called with journal locked */
+int jbd2_log_start_commit(journal_t *journal, tid_t tid);
+int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
+int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
+int jbd2_journal_force_commit_nested(journal_t *journal);
+int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
+int jbd2_log_do_checkpoint(journal_t *journal);
+
+void __jbd2_log_wait_for_space(journal_t *journal);
+extern void    __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
+extern int     jbd2_cleanup_journal_tail(journal_t *);
+
+/* Debugging code only: */
+
+#define jbd_ENOSYS() \
+do {                                                                      \
+       printk (KERN_ERR "JBD unimplemented function %s\n", __FUNCTION__); \
+       current->state = TASK_UNINTERRUPTIBLE;                             \
+       schedule();                                                        \
+} while (1)
+
+/*
+ * is_journal_abort
+ *
+ * Simple test wrapper function to test the JBD2_ABORT state flag.  This
+ * bit, when set, indicates that we have had a fatal error somewhere,
+ * either inside the journaling layer or indicated to us by the client
+ * (eg. ext3), and that we and should not commit any further
+ * transactions.
+ */
+
+static inline int is_journal_aborted(journal_t *journal)
+{
+       return journal->j_flags & JBD2_ABORT;
+}
+
+static inline int is_handle_aborted(handle_t *handle)
+{
+       if (handle->h_aborted)
+               return 1;
+       return is_journal_aborted(handle->h_transaction->t_journal);
+}
+
+static inline void jbd2_journal_abort_handle(handle_t *handle)
+{
+       handle->h_aborted = 1;
+}
+
+#endif /* __KERNEL__   */
+
+/* Comparison functions for transaction IDs: perform comparisons using
+ * modulo arithmetic so that they work over sequence number wraps. */
+
+static inline int tid_gt(tid_t x, tid_t y)
+{
+       int difference = (x - y);
+       return (difference > 0);
+}
+
+static inline int tid_geq(tid_t x, tid_t y)
+{
+       int difference = (x - y);
+       return (difference >= 0);
+}
+
+extern int jbd2_journal_blocks_per_page(struct inode *inode);
+extern size_t journal_tag_bytes(journal_t *journal);
+
+/*
+ * Return the minimum number of blocks which must be free in the journal
+ * before a new transaction may be started.  Must be called under j_state_lock.
+ */
+static inline int jbd_space_needed(journal_t *journal)
+{
+       int nblocks = journal->j_max_transaction_buffers;
+       if (journal->j_committing_transaction)
+               nblocks += journal->j_committing_transaction->
+                                       t_outstanding_credits;
+       return nblocks;
+}
+
+/*
+ * Definitions which augment the buffer_head layer
+ */
+
+/* journaling buffer types */
+#define BJ_None                0       /* Not journaled */
+#define BJ_SyncData    1       /* Normal data: flush before commit */
+#define BJ_Metadata    2       /* Normal journaled metadata */
+#define BJ_Forget      3       /* Buffer superseded by this transaction */
+#define BJ_IO          4       /* Buffer is for temporary IO use */
+#define BJ_Shadow      5       /* Buffer contents being shadowed to the log */
+#define BJ_LogCtl      6       /* Buffer contains log descriptors */
+#define BJ_Reserved    7       /* Buffer is reserved for access by journal */
+#define BJ_Locked      8       /* Locked for I/O during commit */
+#define BJ_Types       9
+
+extern int jbd_blocks_per_page(struct inode *inode);
+
+#ifdef __KERNEL__
+
+#define buffer_trace_init(bh)  do {} while (0)
+#define print_buffer_fields(bh)        do {} while (0)
+#define print_buffer_trace(bh) do {} while (0)
+#define BUFFER_TRACE(bh, info) do {} while (0)
+#define BUFFER_TRACE2(bh, bh2, info)   do {} while (0)
+#define JBUFFER_TRACE(jh, info)        do {} while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_JBD_H */
index 81e3a185f9515ada07ef16bef6b94a619e9196f6..aa50d89eacd77abb748ad7a892f20575b7c4b1bb 100644 (file)
 #define LINUX_LOCKD_BIND_H
 
 #include <linux/lockd/nlm.h>
+/* need xdr-encoded error codes too, so... */
+#include <linux/lockd/xdr.h>
+#ifdef CONFIG_LOCKD_V4
+#include <linux/lockd/xdr4.h>
+#endif
 
 /* Dummy declarations */
 struct svc_rqst;
index bb0a0f1caa91e9bc521686fe6dfed54d7b85c17a..66fdae3b490cca8eaa5ca96f7f9ab9e9eb43849e 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/nfs.h>
 #include <linux/sunrpc/xdr.h>
 
+struct svc_rqst;
+
 #define NLM_MAXCOOKIELEN       32
 #define NLM_MAXSTRLEN          1024
 
@@ -22,6 +24,8 @@
 #define        nlm_lck_blocked         __constant_htonl(NLM_LCK_BLOCKED)
 #define        nlm_lck_denied_grace_period     __constant_htonl(NLM_LCK_DENIED_GRACE_PERIOD)
 
+#define nlm_drop_reply         __constant_htonl(30000)
+
 /* Lock info passed via NLM */
 struct nlm_lock {
        char *                  caller;
index 22036dd2ba362dee7b4b48e90416f65b9f2871aa..156c40fc664e26420f07cd6985efbc8dfa62113c 100644 (file)
@@ -8,6 +8,7 @@
 #define EFS_SUPER_MAGIC                0x414A53
 #define EXT2_SUPER_MAGIC       0xEF53
 #define EXT3_SUPER_MAGIC       0xEF53
+#define EXT4_SUPER_MAGIC       0xEF53
 #define HPFS_SUPER_MAGIC       0xf995e849
 #define ISOFS_SUPER_MAGIC      0x9660
 #define JFFS2_SUPER_MAGIC      0x72b6
index 26146623be2f442949599451376e77b961d8d119..5a6068ff5556f5fcd62566988133aa25d7591de0 100644 (file)
@@ -1103,12 +1103,7 @@ static inline void vm_stat_account(struct mm_struct *mm,
 
 #ifndef CONFIG_DEBUG_PAGEALLOC
 static inline void
-kernel_map_pages(struct page *page, int numpages, int enable)
-{
-       if (!PageHighMem(page) && !enable)
-               debug_check_no_locks_freed(page_address(page),
-                                          numpages * PAGE_SIZE);
-}
+kernel_map_pages(struct page *page, int numpages, int enable) {}
 #endif
 
 extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
index 4b2d8091a4104a0cd603d64a9240d6dbdf9b5396..d1d00ce8f4ed5c53ecd09b8d6f38a0ddcb503089 100644 (file)
@@ -317,9 +317,6 @@ struct module
        /* Am I unsafe to unload? */
        int unsafe;
 
-       /* Am I GPL-compatible */
-       int license_gplok;
-
        unsigned int taints;    /* same bits as kernel:tainted */
 
 #ifdef CONFIG_MODULE_UNLOAD
index e712e7d47cc22a6c0f6e3071bbabbf6770506b16..d6b6dc09ad972d9f844da2e64d765d254ee8f87f 100644 (file)
@@ -15,6 +15,8 @@
 #ifndef LINUX_NBD_H
 #define LINUX_NBD_H
 
+#include <linux/types.h>
+
 #define NBD_SET_SOCK   _IO( 0xab, 0 )
 #define NBD_SET_BLKSIZE        _IO( 0xab, 1 )
 #define NBD_SET_SIZE   _IO( 0xab, 2 )
index c257f716e00f0b1698fb4154d67f6792e3b6fd50..15c733b816f0820afcef4d615ed11cfa7b43cd76 100644 (file)
@@ -19,6 +19,7 @@
 #define _LINUX_NET_H
 
 #include <linux/wait.h>
+#include <linux/random.h>
 #include <asm/socket.h>
 
 struct poll_table_struct;
@@ -193,9 +194,9 @@ extern int       sock_map_fd(struct socket *sock);
 extern struct socket *sockfd_lookup(int fd, int *err);
 #define                     sockfd_put(sock) fput(sock->file)
 extern int          net_ratelimit(void);
-extern unsigned long net_random(void);
-extern void         net_srandom(unsigned long);
-extern void         net_random_init(void);
+
+#define net_random()           random32()
+#define net_srandom(seed)      srandom32(seed)
 
 extern int          kernel_sendmsg(struct socket *sock, struct msghdr *msg,
                                    struct kvec *vec, size_t num, size_t len);
index 5dce5c21822ca12bc6b3c6423352b50fcbe75b44..b1063e9cdb1b77c35f3b8d2ec72656138ff93b5a 100644 (file)
@@ -8,8 +8,8 @@
  * See detailed comments in the file linux/bitmap.h describing the
  * data type on which these nodemasks are based.
  *
- * For details of nodemask_scnprintf() and nodemask_parse(),
- * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
+ * For details of nodemask_scnprintf() and nodemask_parse_user(),
+ * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
  * For details of nodelist_scnprintf() and nodelist_parse(), see
  * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
  * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c.
@@ -51,7 +51,7 @@
  * unsigned long *nodes_addr(mask)     Array of unsigned long's in mask
  *
  * int nodemask_scnprintf(buf, len, mask) Format nodemask for printing
- * int nodemask_parse(ubuf, ulen, mask)        Parse ascii string as nodemask
+ * int nodemask_parse_user(ubuf, ulen, mask)   Parse ascii string as nodemask
  * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing
  * int nodelist_parse(buf, map)                Parse ascii string as nodelist
  * int node_remap(oldbit, old, new)    newbit = map(old, new)(oldbit)
@@ -288,12 +288,12 @@ static inline int __nodemask_scnprintf(char *buf, int len,
        return bitmap_scnprintf(buf, len, srcp->bits, nbits);
 }
 
-#define nodemask_parse(ubuf, ulen, dst) \
-                       __nodemask_parse((ubuf), (ulen), &(dst), MAX_NUMNODES)
-static inline int __nodemask_parse(const char __user *buf, int len,
+#define nodemask_parse_user(ubuf, ulen, dst) \
+               __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
+static inline int __nodemask_parse_user(const char __user *buf, int len,
                                        nodemask_t *dstp, int nbits)
 {
-       return bitmap_parse(buf, len, dstp->bits, nbits);
+       return bitmap_parse_user(buf, len, dstp->bits, nbits);
 }
 
 #define nodelist_scnprintf(buf, len, src) \
index 5d6456bcdebac8e4099809369407583bf04b8d69..0248b30e306d3a5b747201f008a550c46cc4aa0e 100644 (file)
@@ -69,6 +69,9 @@ extern struct file_operations random_fops, urandom_fops;
 unsigned int get_random_int(void);
 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
 
+u32 random32(void);
+void srandom32(u32 seed);
+
 #endif /* __KERNEL___ */
 
 #endif /* _LINUX_RANDOM_H */
index 9b5fea81f55e4e96dfdca7fb83a3ecd0834c2137..b200b9856f32f2f585f6953994bd4ec54a524e9e 100644 (file)
@@ -882,7 +882,8 @@ struct request_sock;
  *     Check permission when a flow selects a xfrm_policy for processing
  *     XFRMs on a packet.  The hook is called when selecting either a
  *     per-socket policy or a generic xfrm policy.
- *     Return 0 if permission is granted.
+ *     Return 0 if permission is granted, -ESRCH otherwise, or -errno
+ *     on other errors.
  * @xfrm_state_pol_flow_match:
  *     @x contains the state to match.
  *     @xp contains the policy to check for a match.
@@ -891,6 +892,7 @@ struct request_sock;
  * @xfrm_flow_state_match:
  *     @fl contains the flow key to match.
  *     @xfrm points to the xfrm_state to match.
+ *     @xp points to the xfrm_policy to match.
  *     Return 1 if there is a match.
  * @xfrm_decode_session:
  *     @skb points to skb to decode.
@@ -1388,7 +1390,8 @@ struct security_operations {
        int (*xfrm_policy_lookup)(struct xfrm_policy *xp, u32 fl_secid, u8 dir);
        int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
                        struct xfrm_policy *xp, struct flowi *fl);
-       int (*xfrm_flow_state_match)(struct flowi *fl, struct xfrm_state *xfrm);
+       int (*xfrm_flow_state_match)(struct flowi *fl, struct xfrm_state *xfrm,
+                       struct xfrm_policy *xp);
        int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
 #endif /* CONFIG_SECURITY_NETWORK_XFRM */
 
@@ -3120,11 +3123,6 @@ static inline int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm
        return security_ops->xfrm_policy_alloc_security(xp, sec_ctx, NULL);
 }
 
-static inline int security_xfrm_sock_policy_alloc(struct xfrm_policy *xp, struct sock *sk)
-{
-       return security_ops->xfrm_policy_alloc_security(xp, NULL, sk);
-}
-
 static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new)
 {
        return security_ops->xfrm_policy_clone_security(old, new);
@@ -3175,9 +3173,10 @@ static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
        return security_ops->xfrm_state_pol_flow_match(x, xp, fl);
 }
 
-static inline int security_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm)
+static inline int security_xfrm_flow_state_match(struct flowi *fl,
+                       struct xfrm_state *xfrm, struct xfrm_policy *xp)
 {
-       return security_ops->xfrm_flow_state_match(fl, xfrm);
+       return security_ops->xfrm_flow_state_match(fl, xfrm, xp);
 }
 
 static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
@@ -3197,11 +3196,6 @@ static inline int security_xfrm_policy_alloc(struct xfrm_policy *xp, struct xfrm
        return 0;
 }
 
-static inline int security_xfrm_sock_policy_alloc(struct xfrm_policy *xp, struct sock *sk)
-{
-       return 0;
-}
-
 static inline int security_xfrm_policy_clone(struct xfrm_policy *old, struct xfrm_policy *new)
 {
        return 0;
@@ -3249,7 +3243,7 @@ static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
 }
 
 static inline int security_xfrm_flow_state_match(struct flowi *fl,
-                                struct xfrm_state *xfrm)
+                       struct xfrm_state *xfrm, struct xfrm_policy *xp)
 {
        return 1;
 }
index 1e65f2dd80e5d5aa4bbd8f5a7885862cccfe5c4f..606cb21652322d33ed1d389e44b6c304d4fb6b8f 100644 (file)
@@ -56,7 +56,9 @@ enum rpc_accept_stat {
        RPC_PROG_MISMATCH = 2,
        RPC_PROC_UNAVAIL = 3,
        RPC_GARBAGE_ARGS = 4,
-       RPC_SYSTEM_ERR = 5
+       RPC_SYSTEM_ERR = 5,
+       /* internal use only */
+       RPC_DROP_REPLY = 60000,
 };
 
 enum rpc_reject_stat {
index 953723b09bc6d73cd5ad97bd0143bdf1fbd162f9..ac69e55116060be1d29fb783a812d861011ac548 100644 (file)
@@ -74,6 +74,7 @@ struct xdr_buf {
 #define        rpc_proc_unavail        __constant_htonl(RPC_PROC_UNAVAIL)
 #define        rpc_garbage_args        __constant_htonl(RPC_GARBAGE_ARGS)
 #define        rpc_system_err          __constant_htonl(RPC_SYSTEM_ERR)
+#define        rpc_drop_reply          __constant_htonl(RPC_DROP_REPLY)
 
 #define        rpc_auth_ok             __constant_htonl(RPC_AUTH_OK)
 #define        rpc_autherr_badcred     __constant_htonl(RPC_AUTH_BADCRED)
index b0ace3fd7eb9f0f8c8a7d14f85a48705e2595930..1912c6cbef553cd1d05795c223df8f8e4425af16 100644 (file)
@@ -431,6 +431,10 @@ asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
                                struct epoll_event __user *event);
 asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
                                int maxevents, int timeout);
+asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
+                               int maxevents, int timeout,
+                               const sigset_t __user *sigmask,
+                               size_t sigsetsize);
 asmlinkage long sys_gethostname(char __user *name, int len);
 asmlinkage long sys_sethostname(char __user *name, int len);
 asmlinkage long sys_setdomainname(char __user *name, int len);
index c5fdf62595483430ea42dec5d483a6503e11c312..df5c4654360d029ba95f222d22b3a74042556944 100644 (file)
@@ -243,7 +243,7 @@ struct v4l2_pix_format
 #define V4L2_PIX_FMT_YUV420  v4l2_fourcc('Y','U','1','2') /* 12  YUV 4:2:0     */
 #define V4L2_PIX_FMT_YYUV    v4l2_fourcc('Y','Y','U','V') /* 16  YUV 4:2:2     */
 #define V4L2_PIX_FMT_HI240   v4l2_fourcc('H','I','2','4') /*  8  8-bit color   */
-#define V4L2_PIX_FMT_HM12    v4l2_fourcc('H','M','1','2') /*  8  YUV 4:1:1 16x16 macroblocks */
+#define V4L2_PIX_FMT_HM12    v4l2_fourcc('H','M','1','2') /*  8  YUV 4:2:0 16x16 macroblocks */
 
 /* see http://www.siliconimaging.com/RGB%20Bayer.htm */
 #define V4L2_PIX_FMT_SBGGR8  v4l2_fourcc('B','A','8','1') /*  8  BGBG.. GRGR.. */
index df22efcfcc0b7deceb77ee1094a05d24f00c22d2..c0fc39620f3643c4393ba2b1d99c8181498b0da0 100644 (file)
@@ -153,6 +153,7 @@ struct hci_conn {
        __u8             mode;
        __u8             type;
        __u8             out;
+       __u8             attempt;
        __u8             dev_class[3];
        __u8             features[8];
        __u16            interval;
@@ -289,6 +290,22 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
        return NULL;
 }
 
+static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
+                                       __u8 type, __u16 state)
+{
+       struct hci_conn_hash *h = &hdev->conn_hash;
+       struct list_head *p;
+       struct hci_conn  *c;
+
+       list_for_each(p, &h->list) {
+               c = list_entry(p, struct hci_conn, list);
+               if (c->type == type && c->state == state)
+                       return c;
+       }
+       return NULL;
+}
+
+void hci_acl_connect(struct hci_conn *conn);
 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
 void hci_add_sco(struct hci_conn *conn, __u16 handle);
 
index ddf5f3ca1720154be67aca197f628f1e7a83fd08..3b44d72b27d3ca25126f7f595dd787e6935e8b8b 100644 (file)
@@ -97,7 +97,7 @@ struct flowi {
 #define FLOW_DIR_FWD   2
 
 struct sock;
-typedef void (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir,
+typedef int (*flow_resolve_t)(struct flowi *key, u16 family, u8 dir,
                               void **objp, atomic_t **obj_refp);
 
 extern void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
index 6d14c22a00c5e3e9ab750fb0c68434edba86c853..5f48748fe017557e0ec61f1d0a5bce47800387c8 100644 (file)
@@ -196,6 +196,7 @@ static inline void inet_twsk_put(struct inet_timewait_sock *tw)
 {
        if (atomic_dec_and_test(&tw->tw_refcnt)) {
                struct module *owner = tw->tw_prot->owner;
+               twsk_destructor((struct sock *)tw);
 #ifdef SOCK_REFCNT_DEBUG
                printk(KERN_DEBUG "%s timewait_sock %p released\n",
                       tw->tw_prot->name, tw);
index 925573fd2aed04a91e41c0de1b84bf09f68e06c5..f13cc0c2b163f76eeb45cfabc4d5a7591f854635 100644 (file)
@@ -19,7 +19,7 @@ struct inet_peer
 {
        struct inet_peer        *avl_left, *avl_right;
        struct inet_peer        *unused_next, **unused_prevp;
-       unsigned long           dtime;          /* the time of last use of not
+       __u32                   dtime;          /* the time of last use of not
                                                 * referenced entries */
        atomic_t                refcnt;
        __be32                  v4daddr;        /* peer's address */
@@ -35,21 +35,8 @@ void                 inet_initpeers(void) __init;
 /* can be called with or without local BH being disabled */
 struct inet_peer       *inet_getpeer(__be32 daddr, int create);
 
-extern spinlock_t inet_peer_unused_lock;
-extern struct inet_peer **inet_peer_unused_tailp;
 /* can be called from BH context or outside */
-static inline void     inet_putpeer(struct inet_peer *p)
-{
-       spin_lock_bh(&inet_peer_unused_lock);
-       if (atomic_dec_and_test(&p->refcnt)) {
-               p->unused_prevp = inet_peer_unused_tailp;
-               p->unused_next = NULL;
-               *inet_peer_unused_tailp = p;
-               inet_peer_unused_tailp = &p->unused_next;
-               p->dtime = jiffies;
-       }
-       spin_unlock_bh(&inet_peer_unused_lock);
-}
+extern void inet_putpeer(struct inet_peer *p);
 
 extern spinlock_t inet_peer_idlock;
 /* can be called with or without local BH being disabled */
index c63a58058e2170b811e01c446b8781b5f1faa93e..12c214b9eadf8b8d7da0f7c0eb12c08512193049 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/net.h>
 #include <linux/skbuff.h>
 #include <net/netlink.h>
+#include <asm/atomic.h>
 
 /*
  * NetLabel - A management interface for maintaining network packet label
@@ -106,6 +107,7 @@ int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
 
 /* LSM security attributes */
 struct netlbl_lsm_cache {
+       atomic_t refcount;
        void (*free) (const void *data);
        void *data;
 };
@@ -117,7 +119,7 @@ struct netlbl_lsm_secattr {
        unsigned char *mls_cat;
        size_t mls_cat_len;
 
-       struct netlbl_lsm_cache cache;
+       struct netlbl_lsm_cache *cache;
 };
 
 /*
@@ -125,6 +127,43 @@ struct netlbl_lsm_secattr {
  */
 
 
+/**
+ * netlbl_secattr_cache_alloc - Allocate and initialize a secattr cache
+ * @flags: the memory allocation flags
+ *
+ * Description:
+ * Allocate and initialize a netlbl_lsm_cache structure.  Returns a pointer
+ * on success, NULL on failure.
+ *
+ */
+static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags)
+{
+       struct netlbl_lsm_cache *cache;
+
+       cache = kzalloc(sizeof(*cache), flags);
+       if (cache)
+               atomic_set(&cache->refcount, 1);
+       return cache;
+}
+
+/**
+ * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct
+ * @cache: the struct to free
+ *
+ * Description:
+ * Frees @secattr including all of the internal buffers.
+ *
+ */
+static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache)
+{
+       if (!atomic_dec_and_test(&cache->refcount))
+               return;
+
+       if (cache->free)
+               cache->free(cache->data);
+       kfree(cache);
+}
+
 /**
  * netlbl_secattr_init - Initialize a netlbl_lsm_secattr struct
  * @secattr: the struct to initialize
@@ -143,20 +182,16 @@ static inline int netlbl_secattr_init(struct netlbl_lsm_secattr *secattr)
 /**
  * netlbl_secattr_destroy - Clears a netlbl_lsm_secattr struct
  * @secattr: the struct to clear
- * @clear_cache: cache clear flag
  *
  * Description:
  * Destroys the @secattr struct, including freeing all of the internal buffers.
- * If @clear_cache is true then free the cache fields, otherwise leave them
- * intact.  The struct must be reset with a call to netlbl_secattr_init()
- * before reuse.
+ * The struct must be reset with a call to netlbl_secattr_init() before reuse.
  *
  */
-static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr,
-                                         u32 clear_cache)
+static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr)
 {
-       if (clear_cache && secattr->cache.data != NULL && secattr->cache.free)
-               secattr->cache.free(secattr->cache.data);
+       if (secattr->cache)
+               netlbl_secattr_cache_free(secattr->cache);
        kfree(secattr->domain);
        kfree(secattr->mls_cat);
 }
@@ -178,17 +213,14 @@ static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(int flags)
 /**
  * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct
  * @secattr: the struct to free
- * @clear_cache: cache clear flag
  *
  * Description:
- * Frees @secattr including all of the internal buffers.  If @clear_cache is
- * true then free the cache fields, otherwise leave them intact.
+ * Frees @secattr including all of the internal buffers.
  *
  */
-static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr,
-                                      u32 clear_cache)
+static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr)
 {
-       netlbl_secattr_destroy(secattr, clear_cache);
+       netlbl_secattr_destroy(secattr);
        kfree(secattr);
 }
 
index ee68a3124076554d08d3dcc45cf96fc801834253..764e3af5be9340c04b303b82416d29c6bbbc5e8e 100644 (file)
@@ -139,6 +139,7 @@ int sctp_inet_listen(struct socket *sock, int backlog);
 void sctp_write_space(struct sock *sk);
 unsigned int sctp_poll(struct file *file, struct socket *sock,
                poll_table *wait);
+void sctp_sock_rfree(struct sk_buff *skb);
 
 /*
  * sctp/primitive.c
@@ -444,6 +445,19 @@ static inline struct list_head *sctp_list_dequeue(struct list_head *list)
        return result;
 }
 
+/* SCTP version of skb_set_owner_r.  We need this one because
+ * of the way we have to do receive buffer accounting on bundled
+ * chunks.
+ */
+static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+       struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+       skb->sk = sk;
+       skb->destructor = sctp_sock_rfree;
+       atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
+}
+
 /* Tests if the list has one and only one entry. */
 static inline int sctp_list_single_entry(struct list_head *head)
 {
index 6c40cfc4832d4a8beb136b8944fb05a7fad9cd68..1a4ddc1ec7d24cfd9a6b5acfe090c430a40a1502 100644 (file)
@@ -63,6 +63,7 @@ struct sctp_ulpevent {
        __u32 cumtsn;
        int msg_flags;
        int iif;
+       unsigned int rmem_len;
 };
 
 /* Retrieve the skb this event sits inside of. */
index 2544281e1d5e60f3e8b60ff1006327135c452576..be293d795e385396cf2e3e7f7761e1e1efd223c7 100644 (file)
@@ -19,6 +19,7 @@ struct timewait_sock_ops {
        unsigned int    twsk_obj_size;
        int             (*twsk_unique)(struct sock *sk,
                                       struct sock *sktw, void *twp);
+       void            (*twsk_destructor)(struct sock *sk);
 };
 
 static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -28,4 +29,10 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
        return 0;
 }
 
+static inline void twsk_destructor(struct sock *sk)
+{
+       if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
+               sk->sk_prot->twsk_prot->twsk_destructor(sk);
+}
+
 #endif /* _TIMEWAIT_SOCK_H */
index 1e2a4ddec96e3c7cfdf8e5ef2010d234c0673821..737fdb2ee8a45bb230c87e3303d52f098d74f53a 100644 (file)
@@ -995,7 +995,8 @@ struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
                                  int create, unsigned short family);
 extern void xfrm_policy_flush(u8 type);
 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
-extern int xfrm_bundle_ok(struct xfrm_dst *xdst, struct flowi *fl, int family, int strict);
+extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
+                         struct flowi *fl, int family, int strict);
 extern void xfrm_init_pmtu(struct dst_entry *dst);
 
 extern wait_queue_head_t km_waitq;
index 32c96628463eb46bab59ff81928d44e573711170..27dd3ee47099dd49c26790adf40b42efa1b950a3 100644 (file)
@@ -19,7 +19,7 @@
 static DEFINE_MUTEX(cpu_add_remove_lock);
 static DEFINE_MUTEX(cpu_bitmask_lock);
 
-static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);
+static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
 
 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  * Should always be manipulated under cpu_add_remove_lock
@@ -68,7 +68,11 @@ EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
 /* Need to know about CPUs going up/down? */
 int __cpuinit register_cpu_notifier(struct notifier_block *nb)
 {
-       return blocking_notifier_chain_register(&cpu_chain, nb);
+       int ret;
+       mutex_lock(&cpu_add_remove_lock);
+       ret = raw_notifier_chain_register(&cpu_chain, nb);
+       mutex_unlock(&cpu_add_remove_lock);
+       return ret;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -77,7 +81,9 @@ EXPORT_SYMBOL(register_cpu_notifier);
 
 void unregister_cpu_notifier(struct notifier_block *nb)
 {
-       blocking_notifier_chain_unregister(&cpu_chain, nb);
+       mutex_lock(&cpu_add_remove_lock);
+       raw_notifier_chain_unregister(&cpu_chain, nb);
+       mutex_unlock(&cpu_add_remove_lock);
 }
 EXPORT_SYMBOL(unregister_cpu_notifier);
 
@@ -126,7 +132,7 @@ static int _cpu_down(unsigned int cpu)
        if (!cpu_online(cpu))
                return -EINVAL;
 
-       err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
+       err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
                                                (void *)(long)cpu);
        if (err == NOTIFY_BAD) {
                printk("%s: attempt to take down CPU %u failed\n",
@@ -146,7 +152,7 @@ static int _cpu_down(unsigned int cpu)
 
        if (IS_ERR(p)) {
                /* CPU didn't die: tell everyone.  Can't complain. */
-               if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
+               if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
                                (void *)(long)cpu) == NOTIFY_BAD)
                        BUG();
 
@@ -169,7 +175,7 @@ static int _cpu_down(unsigned int cpu)
        put_cpu();
 
        /* CPU is completely dead: tell everyone.  Too late to complain. */
-       if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD,
+       if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD,
                        (void *)(long)cpu) == NOTIFY_BAD)
                BUG();
 
@@ -206,7 +212,7 @@ static int __devinit _cpu_up(unsigned int cpu)
        if (cpu_online(cpu) || !cpu_present(cpu))
                return -EINVAL;
 
-       ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
+       ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
        if (ret == NOTIFY_BAD) {
                printk("%s: attempt to bring up CPU %u failed\n",
                                __FUNCTION__, cpu);
@@ -223,11 +229,11 @@ static int __devinit _cpu_up(unsigned int cpu)
        BUG_ON(!cpu_online(cpu));
 
        /* Now call notifier in preparation. */
-       blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
+       raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
 
 out_notify:
        if (ret != 0)
-               blocking_notifier_call_chain(&cpu_chain,
+               raw_notifier_call_chain(&cpu_chain,
                                CPU_UP_CANCELED, hcpu);
 
        return ret;
index 7dc6140baac69325f1ce2fa08fa9bd858aa57258..29ebb30850eda41f743cdacb6d7bb0f696872a87 100644 (file)
@@ -984,6 +984,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if (!p)
                goto fork_out;
 
+       rt_mutex_init_task(p);
+
 #ifdef CONFIG_TRACE_IRQFLAGS
        DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
        DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
@@ -1088,8 +1090,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->lockdep_recursion = 0;
 #endif
 
-       rt_mutex_init_task(p);
-
 #ifdef CONFIG_DEBUG_MUTEXES
        p->blocked_on = NULL; /* not blocked yet */
 #endif
index 11c99697acfe9149ff4ebc9adde4b78caa7c9ca5..2d0dc3efe8137452f2ebfa194e84ff157573e66d 100644 (file)
@@ -499,7 +499,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
 #endif /* CONFIG_SMP */
 
 void
-__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained)
+__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+                 const char *name)
 {
        struct irq_desc *desc;
        unsigned long flags;
@@ -540,6 +541,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained)
                desc->depth = 1;
        }
        desc->handle_irq = handle;
+       desc->name = name;
 
        if (handle != handle_bad_irq && is_chained) {
                desc->status &= ~IRQ_DISABLED;
@@ -555,30 +557,13 @@ set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
                         irq_flow_handler_t handle)
 {
        set_irq_chip(irq, chip);
-       __set_irq_handler(irq, handle, 0);
+       __set_irq_handler(irq, handle, 0, NULL);
 }
 
-/*
- * Get a descriptive string for the highlevel handler, for
- * /proc/interrupts output:
- */
-const char *
-handle_irq_name(irq_flow_handler_t handle)
+void
+set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+                             irq_flow_handler_t handle, const char *name)
 {
-       if (handle == handle_level_irq)
-               return "level  ";
-       if (handle == handle_fasteoi_irq)
-               return "fasteoi";
-       if (handle == handle_edge_irq)
-               return "edge   ";
-       if (handle == handle_simple_irq)
-               return "simple ";
-#ifdef CONFIG_SMP
-       if (handle == handle_percpu_irq)
-               return "percpu ";
-#endif
-       if (handle == handle_bad_irq)
-               return "bad    ";
-
-       return NULL;
+       set_irq_chip(irq, chip);
+       __set_irq_handler(irq, handle, 0, name);
 }
index 607c7809ad0125e7aad8d28a308319601a7223ea..9a352667007ce52fe610c7c72cc190772cb126dd 100644 (file)
@@ -57,7 +57,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
        if (!irq_desc[irq].chip->set_affinity || no_irq_affinity)
                return -EIO;
 
-       err = cpumask_parse(buffer, count, new_value);
+       err = cpumask_parse_user(buffer, count, new_value);
        if (err)
                return err;
 
index ba7156ac70c145d122d0f15c27782519f38f335d..b739be2a6dc9adff9eb6a471a0044bbeedb54976 100644 (file)
@@ -575,6 +575,8 @@ static noinline int print_circular_bug_tail(void)
        return 0;
 }
 
+#define RECURSION_LIMIT 40
+
 static int noinline print_infinite_recursion_bug(void)
 {
        __raw_spin_unlock(&hash_lock);
@@ -595,7 +597,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
        debug_atomic_inc(&nr_cyclic_check_recursions);
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
-       if (depth >= 20)
+       if (depth >= RECURSION_LIMIT)
                return print_infinite_recursion_bug();
        /*
         * Check this lock's dependency list:
@@ -645,7 +647,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
 
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
-       if (depth >= 20)
+       if (depth >= RECURSION_LIMIT)
                return print_infinite_recursion_bug();
 
        debug_atomic_inc(&nr_find_usage_forwards_checks);
@@ -684,7 +686,7 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
 
        if (depth > max_recursion_depth)
                max_recursion_depth = depth;
-       if (depth >= 20)
+       if (depth >= RECURSION_LIMIT)
                return print_infinite_recursion_bug();
 
        debug_atomic_inc(&nr_find_usage_backwards_checks);
@@ -1114,8 +1116,6 @@ static int count_matching_names(struct lock_class *new_class)
        return count + 1;
 }
 
-extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
-
 /*
  * Register a lock's class in the hash-table, if the class is not present
  * yet. Otherwise we look it up. We cache the result in the lock object
@@ -1153,8 +1153,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
         * (or spin_lock_init()) call - which acts as the key. For static
         * locks we use the lock object itself as the key.
         */
-       if (sizeof(struct lock_class_key) > sizeof(struct lock_class))
-               __error_too_big_MAX_LOCKDEP_SUBCLASSES();
+       BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
 
        key = lock->key->subkeys + subclass;
 
index 7f60e782de1e49fdfbcc96374e39898920cea94a..67009bd56c522b634d35a9b8c2852228b4b29730 100644 (file)
@@ -87,6 +87,12 @@ static inline int strong_try_module_get(struct module *mod)
        return try_module_get(mod);
 }
 
+static inline void add_taint_module(struct module *mod, unsigned flag)
+{
+       add_taint(flag);
+       mod->taints |= flag;
+}
+
 /* A thread that wants to hold a reference to a module only while it
  * is running can call ths to safely exit.
  * nfsd and lockd use this.
@@ -847,12 +853,10 @@ static int check_version(Elf_Shdr *sechdrs,
                return 0;
        }
        /* Not in module's version table.  OK, but that taints the kernel. */
-       if (!(tainted & TAINT_FORCED_MODULE)) {
+       if (!(tainted & TAINT_FORCED_MODULE))
                printk("%s: no version for \"%s\" found: kernel tainted.\n",
                       mod->name, symname);
-               add_taint(TAINT_FORCED_MODULE);
-               mod->taints |= TAINT_FORCED_MODULE;
-       }
+       add_taint_module(mod, TAINT_FORCED_MODULE);
        return 1;
 }
 
@@ -910,7 +914,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
        unsigned long ret;
        const unsigned long *crc;
 
-       ret = __find_symbol(name, &owner, &crc, mod->license_gplok);
+       ret = __find_symbol(name, &owner, &crc,
+                       !(mod->taints & TAINT_PROPRIETARY_MODULE));
        if (ret) {
                /* use_module can fail due to OOM, or module unloading */
                if (!check_version(sechdrs, versindex, name, mod, crc) ||
@@ -1335,12 +1340,11 @@ static void set_license(struct module *mod, const char *license)
        if (!license)
                license = "unspecified";
 
-       mod->license_gplok = license_is_gpl_compatible(license);
-       if (!mod->license_gplok && !(tainted & TAINT_PROPRIETARY_MODULE)) {
-               printk(KERN_WARNING "%s: module license '%s' taints kernel.\n",
-                      mod->name, license);
-               add_taint(TAINT_PROPRIETARY_MODULE);
-               mod->taints |= TAINT_PROPRIETARY_MODULE;
+       if (!license_is_gpl_compatible(license)) {
+               if (!(tainted & TAINT_PROPRIETARY_MODULE))
+                       printk(KERN_WARNING "%s: module license '%s' taints"
+                               "kernel.\n", mod->name, license);
+               add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
        }
 }
 
@@ -1619,8 +1623,7 @@ static struct module *load_module(void __user *umod,
        modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
        /* This is allowed: modprobe --force will invalidate it. */
        if (!modmagic) {
-               add_taint(TAINT_FORCED_MODULE);
-               mod->taints |= TAINT_FORCED_MODULE;
+               add_taint_module(mod, TAINT_FORCED_MODULE);
                printk(KERN_WARNING "%s: no version magic, tainting kernel.\n",
                       mod->name);
        } else if (!same_magic(modmagic, vermagic)) {
@@ -1714,14 +1717,10 @@ static struct module *load_module(void __user *umod,
        /* Set up license info based on the info section */
        set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
 
-       if (strcmp(mod->name, "ndiswrapper") == 0) {
-               add_taint(TAINT_PROPRIETARY_MODULE);
-               mod->taints |= TAINT_PROPRIETARY_MODULE;
-       }
-       if (strcmp(mod->name, "driverloader") == 0) {
-               add_taint(TAINT_PROPRIETARY_MODULE);
-               mod->taints |= TAINT_PROPRIETARY_MODULE;
-       }
+       if (strcmp(mod->name, "ndiswrapper") == 0)
+               add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+       if (strcmp(mod->name, "driverloader") == 0)
+               add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
 
        /* Set up MODINFO_ATTR fields */
        setup_modinfo(mod, sechdrs, infoindex);
@@ -1766,8 +1765,7 @@ static struct module *load_module(void __user *umod,
            (mod->num_unused_gpl_syms && !unusedgplcrcindex)) {
                printk(KERN_WARNING "%s: No versions for exported symbols."
                       " Tainting kernel.\n", mod->name);
-               add_taint(TAINT_FORCED_MODULE);
-               mod->taints |= TAINT_FORCED_MODULE;
+               add_taint_module(mod, TAINT_FORCED_MODULE);
        }
 #endif
 
@@ -2132,9 +2130,33 @@ static void m_stop(struct seq_file *m, void *p)
        mutex_unlock(&module_mutex);
 }
 
+static char *taint_flags(unsigned int taints, char *buf)
+{
+       int bx = 0;
+
+       if (taints) {
+               buf[bx++] = '(';
+               if (taints & TAINT_PROPRIETARY_MODULE)
+                       buf[bx++] = 'P';
+               if (taints & TAINT_FORCED_MODULE)
+                       buf[bx++] = 'F';
+               /*
+                * TAINT_FORCED_RMMOD: could be added.
+                * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
+                * apply to modules.
+                */
+               buf[bx++] = ')';
+       }
+       buf[bx] = '\0';
+
+       return buf;
+}
+
 static int m_show(struct seq_file *m, void *p)
 {
        struct module *mod = list_entry(p, struct module, list);
+       char buf[8];
+
        seq_printf(m, "%s %lu",
                   mod->name, mod->init_size + mod->core_size);
        print_unload_info(m, mod);
@@ -2147,6 +2169,10 @@ static int m_show(struct seq_file *m, void *p)
        /* Used by oprofile and other similar tools. */
        seq_printf(m, " 0x%p", mod->module_core);
 
+       /* Taints info */
+       if (mod->taints)
+               seq_printf(m, " %s", taint_flags(mod->taints, buf));
+
        seq_printf(m, "\n");
        return 0;
 }
@@ -2235,28 +2261,6 @@ struct module *module_text_address(unsigned long addr)
        return mod;
 }
 
-static char *taint_flags(unsigned int taints, char *buf)
-{
-       *buf = '\0';
-       if (taints) {
-               int bx;
-
-               buf[0] = '(';
-               bx = 1;
-               if (taints & TAINT_PROPRIETARY_MODULE)
-                       buf[bx++] = 'P';
-               if (taints & TAINT_FORCED_MODULE)
-                       buf[bx++] = 'F';
-               /*
-                * TAINT_FORCED_RMMOD: could be added.
-                * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
-                * apply to modules.
-                */
-               buf[bx] = ')';
-       }
-       return buf;
-}
-
 /* Don't grab lock, we're oopsing. */
 void print_modules(void)
 {
index 479b16b44f79f6b639683c724405f51b3ecc156f..7c3e1e6dfb5b5eef84f61f6259e764703846d9fd 100644 (file)
@@ -87,6 +87,19 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
        return a;
 }
 
+/*
+ * Divide and limit the result to res >= 1
+ *
+ * This is necessary to prevent signal delivery starvation, when the result of
+ * the division would be rounded down to 0.
+ */
+static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
+{
+       cputime_t res = cputime_div(time, div);
+
+       return max_t(cputime_t, res, 1);
+}
+
 /*
  * Update expiry time from increment, and increase overrun count,
  * given the current clock sample.
@@ -483,8 +496,8 @@ static void process_timer_rebalance(struct task_struct *p,
                BUG();
                break;
        case CPUCLOCK_PROF:
-               left = cputime_div(cputime_sub(expires.cpu, val.cpu),
-                                  nthreads);
+               left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
+                                      nthreads);
                do {
                        if (likely(!(t->flags & PF_EXITING))) {
                                ticks = cputime_add(prof_ticks(t), left);
@@ -498,8 +511,8 @@ static void process_timer_rebalance(struct task_struct *p,
                } while (t != p);
                break;
        case CPUCLOCK_VIRT:
-               left = cputime_div(cputime_sub(expires.cpu, val.cpu),
-                                  nthreads);
+               left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
+                                      nthreads);
                do {
                        if (likely(!(t->flags & PF_EXITING))) {
                                ticks = cputime_add(virt_ticks(t), left);
@@ -515,6 +528,7 @@ static void process_timer_rebalance(struct task_struct *p,
        case CPUCLOCK_SCHED:
                nsleft = expires.sched - val.sched;
                do_div(nsleft, nthreads);
+               nsleft = max_t(unsigned long long, nsleft, 1);
                do {
                        if (likely(!(t->flags & PF_EXITING))) {
                                ns = t->sched_time + nsleft;
@@ -1159,12 +1173,13 @@ static void check_process_timers(struct task_struct *tsk,
 
                prof_left = cputime_sub(prof_expires, utime);
                prof_left = cputime_sub(prof_left, stime);
-               prof_left = cputime_div(prof_left, nthreads);
+               prof_left = cputime_div_non_zero(prof_left, nthreads);
                virt_left = cputime_sub(virt_expires, utime);
-               virt_left = cputime_div(virt_left, nthreads);
+               virt_left = cputime_div_non_zero(virt_left, nthreads);
                if (sched_expires) {
                        sched_left = sched_expires - sched_time;
                        do_div(sched_left, nthreads);
+                       sched_left = max_t(unsigned long long, sched_left, 1);
                } else {
                        sched_left = 0;
                }
index d72234942798e6606d613cf6c99d5fc72ab40c09..d3a158a60312187242c8e1238b68ac5151d11459 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/fs.h>
 #include <linux/mount.h>
 #include <linux/pm.h>
+#include <linux/console.h>
 #include <linux/cpu.h>
 
 #include "power.h"
@@ -119,8 +120,10 @@ int pm_suspend_disk(void)
        if (error)
                return error;
 
+       suspend_console();
        error = device_suspend(PMSG_FREEZE);
        if (error) {
+               resume_console();
                printk("Some devices failed to suspend\n");
                unprepare_processes();
                return error;
@@ -133,6 +136,7 @@ int pm_suspend_disk(void)
 
        if (in_suspend) {
                device_resume();
+               resume_console();
                pr_debug("PM: writing image.\n");
                error = swsusp_write();
                if (!error)
@@ -148,6 +152,7 @@ int pm_suspend_disk(void)
        swsusp_free();
  Done:
        device_resume();
+       resume_console();
        unprepare_processes();
        return error;
 }
@@ -212,7 +217,9 @@ static int software_resume(void)
 
        pr_debug("PM: Preparing devices for restore.\n");
 
+       suspend_console();
        if ((error = device_suspend(PMSG_PRETHAW))) {
+               resume_console();
                printk("Some devices failed to suspend\n");
                swsusp_free();
                goto Thaw;
@@ -224,6 +231,7 @@ static int software_resume(void)
        swsusp_resume();
        pr_debug("PM: Restore failed, recovering.n");
        device_resume();
+       resume_console();
  Thaw:
        unprepare_processes();
  Done:
index 9b2ee5344dee10b51ae4f0d1fda84b1aa99d3de4..1a3b0dd2c3fcc18b2db25fc7472560cb67edfb7e 100644 (file)
@@ -425,7 +425,8 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
                        bio_set_pages_dirty(bio);
                bio_put(bio);
        } else {
-               get_page(page);
+               if (rw == READ)
+                       get_page(page); /* These pages are freed later */
                bio->bi_private = *bio_chain;
                *bio_chain = bio;
                submit_bio(rw | (1 << BIO_RW_SYNC), bio);
index 93b5dd283dea05dcdf7c6f1681f67385292ba9c8..d991d3b0e5a4e326ea29b6b7fc50c4a926dd5e3e 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/swapops.h>
 #include <linux/pm.h>
 #include <linux/fs.h>
+#include <linux/console.h>
 #include <linux/cpu.h>
 
 #include <asm/uaccess.h>
@@ -173,12 +174,14 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                /* Free memory before shutting down devices. */
                error = swsusp_shrink_memory();
                if (!error) {
+                       suspend_console();
                        error = device_suspend(PMSG_FREEZE);
                        if (!error) {
                                in_suspend = 1;
                                error = swsusp_suspend();
                                device_resume();
                        }
+                       resume_console();
                }
                up(&pm_sem);
                if (!error)
@@ -196,11 +199,13 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                snapshot_free_unused_memory(&data->handle);
                down(&pm_sem);
                pm_prepare_console();
+               suspend_console();
                error = device_suspend(PMSG_PRETHAW);
                if (!error) {
                        error = swsusp_resume();
                        device_resume();
                }
+               resume_console();
                pm_restore_console();
                up(&pm_sem);
                break;
@@ -289,6 +294,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                }
 
                /* Put devices to sleep */
+               suspend_console();
                error = device_suspend(PMSG_SUSPEND);
                if (error) {
                        printk(KERN_ERR "Failed to suspend some devices.\n");
@@ -299,7 +305,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
                        /* Wake up devices */
                        device_resume();
                }
-
+               resume_console();
                if (pm_ops->finish)
                        pm_ops->finish(PM_SUSPEND_MEM);
 
index 771f5e861bcd38af1d5d7f91666b2413b1d4008b..f7d427ef50385d70db6a0222ea3c53a0d8738d03 100644 (file)
@@ -820,15 +820,8 @@ void release_console_sem(void)
        console_locked = 0;
        up(&console_sem);
        spin_unlock_irqrestore(&logbuf_lock, flags);
-       if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) {
-               /*
-                * If we printk from within the lock dependency code,
-                * from within the scheduler code, then do not lock
-                * up due to self-recursion:
-                */
-               if (!lockdep_internal())
-                       wake_up_interruptible(&log_wait);
-       }
+       if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait))
+               wake_up_interruptible(&log_wait);
 }
 EXPORT_SYMBOL(release_console_sem);
 
index 857300a2afec92d97a724255402780745f042916..f940b462eec9a34eb7c3cad4ee958a2ec0f1f5bc 100644 (file)
@@ -399,7 +399,7 @@ static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffe
        unsigned long full_count = count, err;
        cpumask_t new_value;
 
-       err = cpumask_parse(buffer, count, new_value);
+       err = cpumask_parse_user(buffer, count, new_value);
        if (err)
                return err;
 
index 53608a59d6e3c0fd3d0b18dbf919beb9b0125397..094b5687eef6da6864ae30bb1ce4d936a5552151 100644 (file)
@@ -1822,14 +1822,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm = next->mm;
        struct mm_struct *oldmm = prev->active_mm;
 
-       if (unlikely(!mm)) {
+       if (!mm) {
                next->active_mm = oldmm;
                atomic_inc(&oldmm->mm_count);
                enter_lazy_tlb(oldmm, next);
        } else
                switch_mm(oldmm, mm, next);
 
-       if (unlikely(!prev->mm)) {
+       if (!prev->mm) {
                prev->active_mm = NULL;
                WARN_ON(rq->prev_mm);
                rq->prev_mm = oldmm;
@@ -3491,7 +3491,7 @@ asmlinkage void __sched preempt_schedule(void)
         * If there is a non-zero preempt_count or interrupts are disabled,
         * we do not want to preempt the current task.  Just return..
         */
-       if (unlikely(ti->preempt_count || irqs_disabled()))
+       if (likely(ti->preempt_count || irqs_disabled()))
                return;
 
 need_resched:
index 7a3b2e75f0402122ced8b15d5488a6b9de49ee8e..0e53314b14de7124456faa2ea6cc0f7aa87fcdb9 100644 (file)
@@ -49,6 +49,7 @@ cond_syscall(compat_sys_get_robust_list);
 cond_syscall(sys_epoll_create);
 cond_syscall(sys_epoll_ctl);
 cond_syscall(sys_epoll_wait);
+cond_syscall(sys_epoll_pwait);
 cond_syscall(sys_semget);
 cond_syscall(sys_semop);
 cond_syscall(sys_semtimedop);
index 126bb30c4afe42b01b78945d2105033f994979b3..a99b2a6e6a07354781da79b6067b1944d438d5f9 100644 (file)
@@ -57,7 +57,7 @@ static cycle_t jiffies_read(void)
 
 struct clocksource clocksource_jiffies = {
        .name           = "jiffies",
-       .rating         = 0, /* lowest rating*/
+       .rating         = 1, /* lowest valid rating*/
        .read           = jiffies_read,
        .mask           = 0xffffffff, /*32bits*/
        .mult           = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
index cfc737bffe6deb8d5b2c97658b19e7917e296cce..3df9bfc7ff78fed6215a2d6f138a18a016d7e04a 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/notifier.h>
 #include <linux/kthread.h>
 #include <linux/hardirq.h>
+#include <linux/mempolicy.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -245,6 +246,12 @@ static int worker_thread(void *__cwq)
        sigprocmask(SIG_BLOCK, &blocked, NULL);
        flush_signals(current);
 
+       /*
+        * We inherited MPOL_INTERLEAVE from the booting kernel.
+        * Set MPOL_DEFAULT to insure node local allocations.
+        */
+       numa_default_policy();
+
        /* SIG_IGN makes children autoreap: see do_notify_parent(). */
        sa.sa.sa_handler = SIG_IGN;
        sa.sa.sa_flags = 0;
index 756a908c441d28710e773e15b7b619191f9efdcd..77491e311791ee807d8ee082b4bae7e966d499d2 100644 (file)
@@ -71,7 +71,7 @@ config LOG_BUF_SHIFT
 
 config DETECT_SOFTLOCKUP
        bool "Detect Soft Lockups"
-       depends on DEBUG_KERNEL
+       depends on DEBUG_KERNEL && !S390
        default y
        help
          Say Y here to enable the kernel to detect "soft lockups",
@@ -371,6 +371,20 @@ config FORCED_INLINING
          become the default in the future, until then this option is there to
          test gcc for this.
 
+config HEADERS_CHECK
+       bool "Run 'make headers_check' when building vmlinux"
+       depends on !UML
+       help
+         This option will extract the user-visible kernel headers whenever
+         building the kernel, and will run basic sanity checks on them to
+         ensure that exported files do not attempt to include files which
+         were not exported, etc.
+
+         If you're making modifications to header files which are
+         relevant for userspace, say 'Y', and check the headers
+         exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
+         your build tree), to make sure they're suitable.
+
 config RCU_TORTURE_TEST
        tristate "torture tests for RCU"
        depends on DEBUG_KERNEL
index 8e6662bb9c379a19511319d4fc76fc44828e9465..cf98fabaa549524d2f882d06649abfaa4c91239d 100644 (file)
@@ -12,7 +12,7 @@ lib-$(CONFIG_SMP) += cpumask.o
 
 lib-y  += kobject.o kref.o kobject_uevent.o klist.o
 
-obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o
+obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o random32.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
index d71e38c54ea50444c5d76c91471c3827c4beaf64..037fa9aa2ed77f554296c7f1f4b64f769fdd2a73 100644 (file)
@@ -316,10 +316,11 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
 EXPORT_SYMBOL(bitmap_scnprintf);
 
 /**
- * bitmap_parse - convert an ASCII hex string into a bitmap.
- * @ubuf: pointer to buffer in user space containing string.
- * @ubuflen: buffer size in bytes.  If string is smaller than this
+ * __bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @buf: pointer to buffer containing string.
+ * @buflen: buffer size in bytes.  If string is smaller than this
  *    then it must be terminated with a \0.
+ * @is_user: location of buffer, 0 indicates kernel space
  * @maskp: pointer to bitmap array that will contain result.
  * @nmaskbits: size of bitmap, in bits.
  *
@@ -330,11 +331,13 @@ EXPORT_SYMBOL(bitmap_scnprintf);
  * characters and for grouping errors such as "1,,5", ",44", "," and "".
  * Leading and trailing whitespace accepted, but not embedded whitespace.
  */
-int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
-        unsigned long *maskp, int nmaskbits)
+int __bitmap_parse(const char *buf, unsigned int buflen,
+               int is_user, unsigned long *maskp,
+               int nmaskbits)
 {
        int c, old_c, totaldigits, ndigits, nchunks, nbits;
        u32 chunk;
+       const char __user *ubuf = buf;
 
        bitmap_zero(maskp, nmaskbits);
 
@@ -343,11 +346,15 @@ int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
                chunk = ndigits = 0;
 
                /* Get the next chunk of the bitmap */
-               while (ubuflen) {
+               while (buflen) {
                        old_c = c;
-                       if (get_user(c, ubuf++))
-                               return -EFAULT;
-                       ubuflen--;
+                       if (is_user) {
+                               if (__get_user(c, ubuf++))
+                                       return -EFAULT;
+                       }
+                       else
+                               c = *buf++;
+                       buflen--;
                        if (isspace(c))
                                continue;
 
@@ -388,11 +395,36 @@ int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
                nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
                if (nbits > nmaskbits)
                        return -EOVERFLOW;
-       } while (ubuflen && c == ',');
+       } while (buflen && c == ',');
 
        return 0;
 }
-EXPORT_SYMBOL(bitmap_parse);
+EXPORT_SYMBOL(__bitmap_parse);
+
+/**
+ * bitmap_parse_user()
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes.  If string is smaller than this
+ *    then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Wrapper for __bitmap_parse(), providing it with user buffer.
+ *
+ * We cannot have this as an inline function in bitmap.h because it needs
+ * linux/uaccess.h to get the access_ok() declaration and this causes
+ * cyclic dependencies.
+ */
+int bitmap_parse_user(const char __user *ubuf,
+                       unsigned int ulen, unsigned long *maskp,
+                       int nmaskbits)
+{
+       if (!access_ok(VERIFY_READ, ubuf, ulen))
+               return -EFAULT;
+       return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
+}
+EXPORT_SYMBOL(bitmap_parse_user);
 
 /*
  * bscnl_emit(buf, buflen, rbot, rtop, bp)
diff --git a/lib/random32.c b/lib/random32.c
new file mode 100644 (file)
index 0000000..4a15ce5
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+  This is a maximally equidistributed combined Tausworthe generator
+  based on code from GNU Scientific Library 1.5 (30 Jun 2004)
+
+   x_n = (s1_n ^ s2_n ^ s3_n)
+
+   s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
+   s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
+   s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
+
+   The period of this generator is about 2^88.
+
+   From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
+   Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
+
+   This is available on the net from L'Ecuyer's home page,
+
+   http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
+   ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
+
+   There is an erratum in the paper "Tables of Maximally
+   Equidistributed Combined LFSR Generators", Mathematics of
+   Computation, 68, 225 (1999), 261--269:
+   http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
+
+        ... the k_j most significant bits of z_j must be non-
+        zero, for each j. (Note: this restriction also applies to the
+        computer code given in [4], but was mistakenly not mentioned in
+        that paper.)
+
+   This affects the seeding procedure by imposing the requirement
+   s1 > 1, s2 > 7, s3 > 15.
+
+*/
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+struct rnd_state {
+       u32 s1, s2, s3;
+};
+
+static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
+
+static u32 __random32(struct rnd_state *state)
+{
+#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
+
+       state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
+       state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
+       state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
+
+       return (state->s1 ^ state->s2 ^ state->s3);
+}
+
+static void __set_random32(struct rnd_state *state, unsigned long s)
+{
+       if (s == 0)
+               s = 1;      /* default seed is 1 */
+
+#define LCG(n) (69069 * n)
+       state->s1 = LCG(s);
+       state->s2 = LCG(state->s1);
+       state->s3 = LCG(state->s2);
+
+       /* "warm it up" */
+       __random32(state);
+       __random32(state);
+       __random32(state);
+       __random32(state);
+       __random32(state);
+       __random32(state);
+}
+
+/**
+ *     random32 - pseudo random number generator
+ *
+ *     A 32 bit pseudo-random number is generated using a fast
+ *     algorithm suitable for simulation. This algorithm is NOT
+ *     considered safe for cryptographic use.
+ */
+u32 random32(void)
+{
+       unsigned long r;
+       struct rnd_state *state = &get_cpu_var(net_rand_state);
+       r = __random32(state);
+       put_cpu_var(state);
+       return r;
+}
+EXPORT_SYMBOL(random32);
+
+/**
+ *     srandom32 - add entropy to pseudo random number generator
+ *     @seed: seed value
+ *
+ *     Add some additional seeding to the random32() pool.
+ *     Note: this pool is per cpu so it only affects current CPU.
+ */
+void srandom32(u32 entropy)
+{
+       struct rnd_state *state = &get_cpu_var(net_rand_state);
+       __set_random32(state, state->s1 ^ entropy);
+       put_cpu_var(state);
+}
+EXPORT_SYMBOL(srandom32);
+
+/*
+ *     Generate some initially weak seeding values to allow
+ *     to start the random32() engine.
+ */
+static int __init random32_init(void)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               struct rnd_state *state = &per_cpu(net_rand_state,i);
+               __set_random32(state, i + jiffies);
+       }
+       return 0;
+}
+core_initcall(random32_init);
+
+/*
+ *     Generate better values after random number generator
+ *     is fully initalized.
+ */
+static int __init random32_reseed(void)
+{
+       int i;
+       unsigned long seed;
+
+       for_each_possible_cpu(i) {
+               struct rnd_state *state = &per_cpu(net_rand_state,i);
+
+               get_random_bytes(&seed, sizeof(seed));
+               __set_random32(state, seed);
+       }
+       return 0;
+}
+late_initcall(random32_reseed);
index 1d709ff528e1e91cd9bbbf086f119c5d4ba3154a..2dbec90dc3bad98cab038db4513f566360e8de8d 100644 (file)
@@ -356,8 +356,8 @@ nomem:
        return -ENOMEM;
 }
 
-void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
-                         unsigned long end)
+void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+                           unsigned long end)
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
@@ -398,6 +398,24 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        }
 }
 
+void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+                         unsigned long end)
+{
+       /*
+        * It is undesirable to test vma->vm_file as it should be non-null
+        * for valid hugetlb area. However, vm_file will be NULL in the error
+        * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
+        * do_mmap_pgoff() nullifies vma->vm_file before calling this function
+        * to clean up. Since no pte has actually been setup, it is safe to
+        * do nothing in this case.
+        */
+       if (vma->vm_file) {
+               spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+               __unmap_hugepage_range(vma, start, end);
+               spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       }
+}
+
 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, pte_t pte)
 {
index 25788b1b7fcff4b6d116dffab3abe6e1ecc17d08..617fb31086eef17d45f5b04df65ff3a851a7a19c 100644 (file)
@@ -727,7 +727,7 @@ int do_migrate_pages(struct mm_struct *mm,
        return -ENOSYS;
 }
 
-static struct page *new_vma_page(struct page *page, unsigned long private)
+static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
 {
        return NULL;
 }
index eea8eefd51a86588bd1c091d94fe74df800a9bc2..497e502dfd6b6e54fc2bf902ea55fac36e70ac72 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -900,17 +900,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        int accountable = 1;
        unsigned long charged = 0, reqprot = prot;
 
-       if (file) {
-               if (is_file_hugepages(file))
-                       accountable = 0;
-
-               if (!file->f_op || !file->f_op->mmap)
-                       return -ENODEV;
-
-               if ((prot & PROT_EXEC) &&
-                   (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
-                       return -EPERM;
-       }
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
         *
@@ -1000,6 +989,16 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
                case MAP_PRIVATE:
                        if (!(file->f_mode & FMODE_READ))
                                return -EACCES;
+                       if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) {
+                               if (vm_flags & VM_EXEC)
+                                       return -EPERM;
+                               vm_flags &= ~VM_MAYEXEC;
+                       }
+                       if (is_file_hugepages(file))
+                               accountable = 0;
+
+                       if (!file->f_op || !file->f_op->mmap)
+                               return -ENODEV;
                        break;
 
                default:
index a8c003e7b3d51ae97c1a443f0c8d2fb1989f57a4..40db96a655d0c23ad7df245f6dd990b6555c8f4f 100644 (file)
@@ -495,17 +495,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        int i;
        int reserved = 0;
 
-       arch_free_page(page, order);
-       if (!PageHighMem(page))
-               debug_check_no_locks_freed(page_address(page),
-                                          PAGE_SIZE<<order);
-
        for (i = 0 ; i < (1 << order) ; ++i)
                reserved += free_pages_check(page + i);
        if (reserved)
                return;
 
+       if (!PageHighMem(page))
+               debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
+       arch_free_page(page, order);
        kernel_map_pages(page, 1 << order, 0);
+
        local_irq_save(flags);
        __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, order);
@@ -781,13 +780,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
        struct per_cpu_pages *pcp;
        unsigned long flags;
 
-       arch_free_page(page, 0);
-
        if (PageAnon(page))
                page->mapping = NULL;
        if (free_pages_check(page))
                return;
 
+       if (!PageHighMem(page))
+               debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
+       arch_free_page(page, 0);
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
@@ -2294,19 +2294,6 @@ unsigned long __init zone_absent_pages_in_node(int nid,
        return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
 }
 
-/* Return the zone index a PFN is in */
-int memmap_zone_idx(struct page *lmem_map)
-{
-       int i;
-       unsigned long phys_addr = virt_to_phys(lmem_map);
-       unsigned long pfn = phys_addr >> PAGE_SHIFT;
-
-       for (i = 0; i < MAX_NR_ZONES; i++)
-               if (pfn < arch_zone_highest_possible_pfn[i])
-                       break;
-
-       return i;
-}
 #else
 static inline unsigned long zone_spanned_pages_in_node(int nid,
                                        unsigned long zone_type,
@@ -2325,10 +2312,6 @@ static inline unsigned long zone_absent_pages_in_node(int nid,
        return zholes_size[zone_type];
 }
 
-static inline int memmap_zone_idx(struct page *lmem_map)
-{
-       return MAX_NR_ZONES;
-}
 #endif
 
 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
index e2155d791d9967a6e1bbf496ee9b8728dd9bb4c5..a9136d8b7577fc47cc5aeb82e2997cd8ded06885 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -576,15 +576,14 @@ void page_add_file_rmap(struct page *page)
 void page_remove_rmap(struct page *page)
 {
        if (atomic_add_negative(-1, &page->_mapcount)) {
-#ifdef CONFIG_DEBUG_VM
                if (unlikely(page_mapcount(page) < 0)) {
                        printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
                        printk (KERN_EMERG "  page->flags = %lx\n", page->flags);
                        printk (KERN_EMERG "  page->count = %x\n", page_count(page));
                        printk (KERN_EMERG "  page->mapping = %p\n", page->mapping);
+                       BUG();
                }
-#endif
-               BUG_ON(page_mapcount(page) < 0);
+
                /*
                 * It would be tidy to reset the PageAnon mapping here,
                 * but that might overwrite a racing page_add_anon_rmap
index bb8ca7ef70940de154adfcbb7f30debd3f3c3737..b378f66cf2f927f01ccfbc4c037060831951a61a 100644 (file)
@@ -1362,6 +1362,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
                inode->i_mapping->a_ops = &shmem_aops;
                inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+               inode->i_generation = get_seconds();
                info = SHMEM_I(inode);
                memset(info, 0, (char *)inode - (char *)info);
                spin_lock_init(&info->lock);
@@ -1956,6 +1957,85 @@ static struct xattr_handler *shmem_xattr_handlers[] = {
 };
 #endif
 
+static struct dentry *shmem_get_parent(struct dentry *child)
+{
+       return ERR_PTR(-ESTALE);
+}
+
+static int shmem_match(struct inode *ino, void *vfh)
+{
+       __u32 *fh = vfh;
+       __u64 inum = fh[2];
+       inum = (inum << 32) | fh[1];
+       return ino->i_ino == inum && fh[0] == ino->i_generation;
+}
+
+static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh)
+{
+       struct dentry *de = NULL;
+       struct inode *inode;
+       __u32 *fh = vfh;
+       __u64 inum = fh[2];
+       inum = (inum << 32) | fh[1];
+
+       inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh);
+       if (inode) {
+               de = d_find_alias(inode);
+               iput(inode);
+       }
+
+       return de? de: ERR_PTR(-ESTALE);
+}
+
+static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
+               int len, int type,
+               int (*acceptable)(void *context, struct dentry *de),
+               void *context)
+{
+       if (len < 3)
+               return ERR_PTR(-ESTALE);
+
+       return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
+                                                       context);
+}
+
+static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
+                               int connectable)
+{
+       struct inode *inode = dentry->d_inode;
+
+       if (*len < 3)
+               return 255;
+
+       if (hlist_unhashed(&inode->i_hash)) {
+               /* Unfortunately insert_inode_hash is not idempotent,
+                * so as we hash inodes here rather than at creation
+                * time, we need a lock to ensure we only try
+                * to do it once
+                */
+               static DEFINE_SPINLOCK(lock);
+               spin_lock(&lock);
+               if (hlist_unhashed(&inode->i_hash))
+                       __insert_inode_hash(inode,
+                                           inode->i_ino + inode->i_generation);
+               spin_unlock(&lock);
+       }
+
+       fh[0] = inode->i_generation;
+       fh[1] = inode->i_ino;
+       fh[2] = ((__u64)inode->i_ino) >> 32;
+
+       *len = 3;
+       return 1;
+}
+
+static struct export_operations shmem_export_ops = {
+       .get_parent     = shmem_get_parent,
+       .get_dentry     = shmem_get_dentry,
+       .encode_fh      = shmem_encode_fh,
+       .decode_fh      = shmem_decode_fh,
+};
+
 static int shmem_parse_options(char *options, int *mode, uid_t *uid,
        gid_t *gid, unsigned long *blocks, unsigned long *inodes,
        int *policy, nodemask_t *policy_nodes)
@@ -2128,6 +2208,7 @@ static int shmem_fill_super(struct super_block *sb,
                                        &inodes, &policy, &policy_nodes))
                        return -EINVAL;
        }
+       sb->s_export_op = &shmem_export_ops;
 #else
        sb->s_flags |= MS_NOUSER;
 #endif
index c946bf4687181a301b33f506ee7b454b1fed6e05..f5664c5b9eb1433425501486562c3c6bb98b2237 100644 (file)
@@ -35,7 +35,7 @@ shmem_get_acl(struct inode *inode, int type)
 }
 
 /**
- * shmem_get_acl  -   generic_acl_operations->setacl() operation
+ * shmem_set_acl  -   generic_acl_operations->setacl() operation
  */
 static void
 shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
index f4edbc179d14423e1f3acf9555b96bc9795b37bc..e07b1e682c38f5fca96436904eb1a9a262ce1426 100644 (file)
@@ -96,7 +96,6 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
                return 0;
 
        ret = remove_mapping(mapping, page);
-       ClearPageUptodate(page);
 
        return ret;
 }
@@ -302,7 +301,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
        if (page->mapping != mapping)
                return 0;
 
-       if (PagePrivate(page) && !try_to_release_page(page, 0))
+       if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
                return 0;
 
        write_lock_irq(&mapping->tree_lock);
@@ -396,6 +395,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                pagevec_release(&pvec);
                cond_resched();
        }
+       WARN_ON_ONCE(ret);
        return ret;
 }
 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
index 750ab6ed13fca56ca42581dd32f74912fd1b347c..1133dd3aafcf4c02e5b2d109b02f45013b7d9d11 100644 (file)
@@ -428,8 +428,11 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        if (array_size > PAGE_SIZE) {
                pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
                area->flags |= VM_VPAGES;
-       } else
-               pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
+       } else {
+               pages = kmalloc_node(array_size,
+                               (gfp_mask & ~(__GFP_HIGHMEM | __GFP_ZERO)),
+                               node);
+       }
        area->pages = pages;
        if (!area->pages) {
                remove_vm_area(area->addr);
index eca70310adb26239e94c5435eaf2abf0271c89fa..af73c14f9d88e862942fe78ba9ac64b2013a3112 100644 (file)
@@ -378,6 +378,12 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
        return PAGE_CLEAN;
 }
 
+/*
+ * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
+ * someone else has a ref on the page, abort and return 0.  If it was
+ * successfully detached, return 1.  Assumes the caller has a single ref on
+ * this page.
+ */
 int remove_mapping(struct address_space *mapping, struct page *page)
 {
        BUG_ON(!PageLocked(page));
index 305a099b7477eb8fd0e4a7a7b487b7bd435e6677..67df99e2e5c82b2a4fc72e28dc748a33209fcd55 100644 (file)
 #define BT_DBG(D...)
 #endif
 
-#define VERSION "2.10"
+#define VERSION "2.11"
 
 /* Bluetooth sockets */
 #define BT_MAX_PROTO   8
 static struct net_proto_family *bt_proto[BT_MAX_PROTO];
+static DEFINE_RWLOCK(bt_proto_lock);
 
 int bt_sock_register(int proto, struct net_proto_family *ops)
 {
+       int err = 0;
+
        if (proto < 0 || proto >= BT_MAX_PROTO)
                return -EINVAL;
 
+       write_lock(&bt_proto_lock);
+
        if (bt_proto[proto])
-               return -EEXIST;
+               err = -EEXIST;
+       else
+               bt_proto[proto] = ops;
 
-       bt_proto[proto] = ops;
-       return 0;
+       write_unlock(&bt_proto_lock);
+
+       return err;
 }
 EXPORT_SYMBOL(bt_sock_register);
 
 int bt_sock_unregister(int proto)
 {
+       int err = 0;
+
        if (proto < 0 || proto >= BT_MAX_PROTO)
                return -EINVAL;
 
+       write_lock(&bt_proto_lock);
+
        if (!bt_proto[proto])
-               return -ENOENT;
+               err = -ENOENT;
+       else
+               bt_proto[proto] = NULL;
 
-       bt_proto[proto] = NULL;
-       return 0;
+       write_unlock(&bt_proto_lock);
+
+       return err;
 }
 EXPORT_SYMBOL(bt_sock_unregister);
 
 static int bt_sock_create(struct socket *sock, int proto)
 {
-       int err = 0;
+       int err;
 
        if (proto < 0 || proto >= BT_MAX_PROTO)
                return -EINVAL;
@@ -92,11 +107,18 @@ static int bt_sock_create(struct socket *sock, int proto)
                request_module("bt-proto-%d", proto);
        }
 #endif
+
        err = -EPROTONOSUPPORT;
+
+       read_lock(&bt_proto_lock);
+
        if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
                err = bt_proto[proto]->create(sock, proto);
                module_put(bt_proto[proto]->owner);
        }
+
+       read_unlock(&bt_proto_lock);
+
        return err; 
 }
 
index 2312d050eeedfb66ed382108f0703f6d2dfc5b9d..4d3424c2421c07b4d925559a1e02e4a6f59b7b19 100644 (file)
@@ -528,12 +528,10 @@ static struct device *bnep_get_device(struct bnep_session *session)
                return NULL;
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
-       if (!conn)
-               return NULL;
 
        hci_dev_put(hdev);
 
-       return &conn->dev;
+       return conn ? &conn->dev : NULL;
 }
 
 int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
index 28c55835422afb90f1bbf660f5dcb387bde3779b..5563db1bf526e94d4a48c5af511315c4d3646786 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/ioctl.h>
 #include <linux/file.h>
 #include <linux/init.h>
+#include <linux/compat.h>
 #include <net/sock.h>
 
 #include <asm/system.h>
@@ -146,24 +147,56 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
        return 0;
 }
 
+#ifdef CONFIG_COMPAT
+static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+       if (cmd == BNEPGETCONNLIST) {
+               struct bnep_connlist_req cl;
+               uint32_t uci;
+               int err;
+
+               if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+                               get_user(uci, (u32 __user *) (arg + 4)))
+                       return -EFAULT;
+
+               cl.ci = compat_ptr(uci);
+
+               if (cl.cnum <= 0)
+                       return -EINVAL;
+       
+               err = bnep_get_connlist(&cl);
+
+               if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+                       err = -EFAULT;
+
+               return err;
+       }
+
+       return bnep_sock_ioctl(sock, cmd, arg);
+}
+#endif
+
 static const struct proto_ops bnep_sock_ops = {
-       .family     = PF_BLUETOOTH,
-       .owner      = THIS_MODULE,
-       .release    = bnep_sock_release,
-       .ioctl      = bnep_sock_ioctl,
-       .bind       = sock_no_bind,
-       .getname    = sock_no_getname,
-       .sendmsg    = sock_no_sendmsg,
-       .recvmsg    = sock_no_recvmsg,
-       .poll       = sock_no_poll,
-       .listen     = sock_no_listen,
-       .shutdown   = sock_no_shutdown,
-       .setsockopt = sock_no_setsockopt,
-       .getsockopt = sock_no_getsockopt,
-       .connect    = sock_no_connect,
-       .socketpair = sock_no_socketpair,
-       .accept     = sock_no_accept,
-       .mmap       = sock_no_mmap
+       .family         = PF_BLUETOOTH,
+       .owner          = THIS_MODULE,
+       .release        = bnep_sock_release,
+       .ioctl          = bnep_sock_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = bnep_sock_compat_ioctl,
+#endif
+       .bind           = sock_no_bind,
+       .getname        = sock_no_getname,
+       .sendmsg        = sock_no_sendmsg,
+       .recvmsg        = sock_no_recvmsg,
+       .poll           = sock_no_poll,
+       .listen         = sock_no_listen,
+       .shutdown       = sock_no_shutdown,
+       .setsockopt     = sock_no_setsockopt,
+       .getsockopt     = sock_no_getsockopt,
+       .connect        = sock_no_connect,
+       .socketpair     = sock_no_socketpair,
+       .accept         = sock_no_accept,
+       .mmap           = sock_no_mmap
 };
 
 static struct proto bnep_proto = {
@@ -181,7 +214,7 @@ static int bnep_sock_create(struct socket *sock, int protocol)
        if (sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &bnep_proto, 1);
+       sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, 1);
        if (!sk)
                return -ENOMEM;
 
index 10ad7fd91d833f26a188b073d46fea1ed1d2cd16..53295d33dc5c31eb610c1766e5795f3c7a123cfd 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/socket.h>
 #include <linux/ioctl.h>
 #include <linux/file.h>
+#include <linux/compat.h>
 #include <net/sock.h>
 
 #include <linux/isdn/capilli.h>
@@ -137,11 +138,43 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
        return -EINVAL;
 }
 
+#ifdef CONFIG_COMPAT
+static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+       if (cmd == CMTPGETCONNLIST) {
+               struct cmtp_connlist_req cl;
+               uint32_t uci;
+               int err;
+
+               if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+                               get_user(uci, (u32 __user *) (arg + 4)))
+                       return -EFAULT;
+
+               cl.ci = compat_ptr(uci);
+
+               if (cl.cnum <= 0)
+                       return -EINVAL;
+       
+               err = cmtp_get_connlist(&cl);
+
+               if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+                       err = -EFAULT;
+
+               return err;
+       }
+
+       return cmtp_sock_ioctl(sock, cmd, arg);
+}
+#endif
+
 static const struct proto_ops cmtp_sock_ops = {
        .family         = PF_BLUETOOTH,
        .owner          = THIS_MODULE,
        .release        = cmtp_sock_release,
        .ioctl          = cmtp_sock_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = cmtp_sock_compat_ioctl,
+#endif
        .bind           = sock_no_bind,
        .getname        = sock_no_getname,
        .sendmsg        = sock_no_sendmsg,
@@ -172,7 +205,7 @@ static int cmtp_sock_create(struct socket *sock, int protocol)
        if (sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &cmtp_proto, 1);
+       sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, 1);
        if (!sk)
                return -ENOMEM;
 
index 90e3a285a17eaf9a4bde198879748d8a4da680a7..6cd5711fa28a59265403a81f608e6698e7f03dfa 100644 (file)
@@ -51,7 +51,7 @@
 #define BT_DBG(D...)
 #endif
 
-static void hci_acl_connect(struct hci_conn *conn)
+void hci_acl_connect(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
        struct inquiry_entry *ie;
@@ -63,6 +63,8 @@ static void hci_acl_connect(struct hci_conn *conn)
        conn->out   = 1;
        conn->link_mode = HCI_LM_MASTER;
 
+       conn->attempt++;
+
        memset(&cp, 0, sizeof(cp));
        bacpy(&cp.bdaddr, &conn->dst);
        cp.pscan_rep_mode = 0x02;
@@ -80,7 +82,7 @@ static void hci_acl_connect(struct hci_conn *conn)
                cp.role_switch  = 0x01;
        else
                cp.role_switch  = 0x00;
-               
+
        hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp);
 }
 
index d43d0c8909752564796b540a61cb58389cc8cb0d..65f094845719126013cfb9f3c1c19e11f4b2b566 100644 (file)
@@ -414,9 +414,12 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
 
        if (status) {
                if (conn && conn->state == BT_CONNECT) {
-                       conn->state = BT_CLOSED;
-                       hci_proto_connect_cfm(conn, status);
-                       hci_conn_del(conn);
+                       if (status != 0x0c || conn->attempt > 2) {
+                               conn->state = BT_CLOSED;
+                               hci_proto_connect_cfm(conn, status);
+                               hci_conn_del(conn);
+                       } else
+                               conn->state = BT_CONNECT2;
                }
        } else {
                if (!conn) {
@@ -728,7 +731,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_conn_complete *ev = (struct hci_ev_conn_complete *) skb->data;
-       struct hci_conn *conn;
+       struct hci_conn *conn, *pend;
 
        BT_DBG("%s", hdev->name);
 
@@ -801,6 +804,10 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
        if (ev->status)
                hci_conn_del(conn);
 
+       pend = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
+       if (pend)
+               hci_acl_connect(pend);
+
        hci_dev_unlock(hdev);
 }
 
index 1a35d343e08a593f91cc71fce52306a50d991c4b..f26a9eb49945c8805db60c07964b9c1a734f4556 100644 (file)
@@ -618,7 +618,7 @@ static int hci_sock_create(struct socket *sock, int protocol)
 
        sock->ops = &hci_sock_ops;
 
-       sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &hci_sk_proto, 1);
+       sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, 1);
        if (!sk)
                return -ENOMEM;
 
index 989b22d9042e92f929435d9f0290d2d429e7fd75..954eb74eb370a34cb97050c9bec17e8cbfdff08c 100644 (file)
@@ -242,10 +242,14 @@ static void add_conn(void *data)
        struct hci_conn *conn = data;
        int i;
 
-       device_register(&conn->dev);
+       if (device_register(&conn->dev) < 0) {
+               BT_ERR("Failed to register connection device");
+               return;
+       }
 
        for (i = 0; conn_attrs[i]; i++)
-               device_create_file(&conn->dev, conn_attrs[i]);
+               if (device_create_file(&conn->dev, conn_attrs[i]) < 0)
+                       BT_ERR("Failed to create connection attribute");
 }
 
 void hci_conn_add_sysfs(struct hci_conn *conn)
@@ -295,11 +299,7 @@ int hci_register_sysfs(struct hci_dev *hdev)
        BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
 
        dev->class = bt_class;
-
-       if (hdev->parent)
-               dev->parent = hdev->parent;
-       else
-               dev->parent = &bt_platform->dev;
+       dev->parent = hdev->parent;
 
        strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE);
 
@@ -312,7 +312,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
                return err;
 
        for (i = 0; bt_attrs[i]; i++)
-               device_create_file(dev, bt_attrs[i]);
+               if (device_create_file(dev, bt_attrs[i]) < 0)
+                       BT_ERR("Failed to create device attribute");
 
        return 0;
 }
index 03b5dadb49511f90b0c57ee6d9d677d580c5ef18..9a562cf7406bc89ba0ea125eec4979a2783744ce 100644 (file)
@@ -510,11 +510,11 @@ static int hidp_session(void *arg)
        if (intr_sk->sk_state != BT_CONNECTED)
                wait_event_timeout(*(ctrl_sk->sk_sleep), (ctrl_sk->sk_state == BT_CLOSED), HZ);
 
-       fput(session->ctrl_sock->file);
+       fput(session->intr_sock->file);
 
        wait_event_timeout(*(intr_sk->sk_sleep), (intr_sk->sk_state == BT_CLOSED), HZ);
 
-       fput(session->intr_sock->file);
+       fput(session->ctrl_sock->file);
 
        __hidp_unlink_session(session);
 
@@ -541,12 +541,10 @@ static struct device *hidp_get_device(struct hidp_session *session)
                return NULL;
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
-       if (!conn)
-               return NULL;
 
        hci_dev_put(hdev);
 
-       return &conn->dev;
+       return conn ? &conn->dev : NULL;
 }
 
 static inline void hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req)
index 099646e4e2ef7407b45687f2188d0bfe2cd7402b..407fba43c1b973cda01122be9cc3dce198c2b930 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/ioctl.h>
 #include <linux/file.h>
 #include <linux/init.h>
+#include <linux/compat.h>
 #include <net/sock.h>
 
 #include "hidp.h"
@@ -143,11 +144,88 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
        return -EINVAL;
 }
 
+#ifdef CONFIG_COMPAT
+struct compat_hidp_connadd_req {
+       int   ctrl_sock;        // Connected control socket
+       int   intr_sock;        // Connteted interrupt socket
+       __u16 parser;
+       __u16 rd_size;
+       compat_uptr_t rd_data;
+       __u8  country;
+       __u8  subclass;
+       __u16 vendor;
+       __u16 product;
+       __u16 version;
+       __u32 flags;
+       __u32 idle_to;
+       char  name[128];
+};
+
+static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+       if (cmd == HIDPGETCONNLIST) {
+               struct hidp_connlist_req cl;
+               uint32_t uci;
+               int err;
+
+               if (get_user(cl.cnum, (uint32_t __user *) arg) ||
+                               get_user(uci, (u32 __user *) (arg + 4)))
+                       return -EFAULT;
+
+               cl.ci = compat_ptr(uci);
+
+               if (cl.cnum <= 0)
+                       return -EINVAL;
+
+               err = hidp_get_connlist(&cl);
+
+               if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
+                       err = -EFAULT;
+
+               return err;
+       } else if (cmd == HIDPCONNADD) {
+               struct compat_hidp_connadd_req ca;
+               struct hidp_connadd_req __user *uca;
+
+               uca = compat_alloc_user_space(sizeof(*uca));
+
+               if (copy_from_user(&ca, (void *) arg, sizeof(ca)))
+                       return -EFAULT;
+
+               if (put_user(ca.ctrl_sock, &uca->ctrl_sock) ||
+                               put_user(ca.intr_sock, &uca->intr_sock) ||
+                               put_user(ca.parser, &uca->parser) ||
+                               put_user(ca.rd_size, &uca->parser) ||
+                               put_user(compat_ptr(ca.rd_data), &uca->rd_data) ||
+                               put_user(ca.country, &uca->country) ||
+                               put_user(ca.subclass, &uca->subclass) ||
+                               put_user(ca.vendor, &uca->vendor) ||
+                               put_user(ca.product, &uca->product) ||
+                               put_user(ca.version, &uca->version) ||
+                               put_user(ca.flags, &uca->flags) ||
+                               put_user(ca.idle_to, &uca->idle_to) ||
+                               copy_to_user(&uca->name[0], &ca.name[0], 128))
+                       return -EFAULT;
+               
+               arg = (unsigned long) uca;
+
+               /* Fall through. We don't actually write back any _changes_
+                  to the structure anyway, so there's no need to copy back
+                  into the original compat version */
+       }
+
+       return hidp_sock_ioctl(sock, cmd, arg);
+}
+#endif
+
 static const struct proto_ops hidp_sock_ops = {
        .family         = PF_BLUETOOTH,
        .owner          = THIS_MODULE,
        .release        = hidp_sock_release,
        .ioctl          = hidp_sock_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = hidp_sock_compat_ioctl,
+#endif
        .bind           = sock_no_bind,
        .getname        = sock_no_getname,
        .sendmsg        = sock_no_sendmsg,
@@ -178,7 +256,7 @@ static int hidp_sock_create(struct socket *sock, int protocol)
        if (sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &hidp_proto, 1);
+       sk = sk_alloc(PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, 1);
        if (!sk)
                return -ENOMEM;
 
index d56f60b392ac512b36a736f928844208ec88847a..2b3dcb8f90fadebdcde2ace1feed9cd90cf76b0e 100644 (file)
@@ -559,7 +559,7 @@ static int l2cap_sock_create(struct socket *sock, int protocol)
 
        sock->ops = &l2cap_sock_ops;
 
-       sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
+       sk = l2cap_sock_alloc(sock, protocol, GFP_ATOMIC);
        if (!sk)
                return -ENOMEM;
 
@@ -2216,7 +2216,8 @@ static int __init l2cap_init(void)
                goto error;
        }
 
-       class_create_file(bt_class, &class_attr_l2cap);
+       if (class_create_file(bt_class, &class_attr_l2cap) < 0)
+               BT_ERR("Failed to create L2CAP info file");
 
        BT_INFO("L2CAP ver %s", VERSION);
        BT_INFO("L2CAP socket layer initialized");
index 468df3b953f6d3c615169a0939e50ec924eb1af8..ddc4e9d5963e850f86d768abef9481f849392439 100644 (file)
@@ -2058,7 +2058,8 @@ static int __init rfcomm_init(void)
 
        kernel_thread(rfcomm_run, NULL, CLONE_KERNEL);
 
-       class_create_file(bt_class, &class_attr_rfcomm_dlc);
+       if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0)
+               BT_ERR("Failed to create RFCOMM info file");
 
        rfcomm_init_sockets();
 
index 220fee04e7f274a3d31505f0133af59e742a4f74..544d65b7baa7de67fe796ec2e4cf68a0c6051e1f 100644 (file)
@@ -336,7 +336,8 @@ static int rfcomm_sock_create(struct socket *sock, int protocol)
 
        sock->ops = &rfcomm_sock_ops;
 
-       if (!(sk = rfcomm_sock_alloc(sock, protocol, GFP_KERNEL)))
+       sk = rfcomm_sock_alloc(sock, protocol, GFP_ATOMIC);
+       if (!sk)
                return -ENOMEM;
 
        rfcomm_sock_init(sk, NULL);
@@ -944,7 +945,8 @@ int __init rfcomm_init_sockets(void)
        if (err < 0)
                goto error;
 
-       class_create_file(bt_class, &class_attr_rfcomm);
+       if (class_create_file(bt_class, &class_attr_rfcomm) < 0)
+               BT_ERR("Failed to create RFCOMM info file");
 
        BT_INFO("RFCOMM socket layer initialized");
 
index 1958ad1b8541e57be7f32bb70b2b19a242884dce..b8e3a5f1c8a80683d5806662cc7f55c46a656bee 100644 (file)
@@ -172,12 +172,10 @@ static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
                return NULL;
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst);
-       if (!conn)
-               return NULL;
 
        hci_dev_put(hdev);
 
-       return &conn->dev;
+       return conn ? &conn->dev : NULL;
 }
 
 static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
@@ -767,6 +765,9 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct termios *old)
 
        BT_DBG("tty %p termios %p", tty, old);
 
+       if (!dev)
+               return;
+
        /* Handle turning off CRTSCTS */
        if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS)) 
                BT_DBG("Turning off CRTSCTS unsupported");
index 7714a2ec3854d032ed0038ebcc8aea5e39494de7..5d13d4f317538e1280118ac7f7f35994437163bf 100644 (file)
@@ -452,7 +452,8 @@ static int sco_sock_create(struct socket *sock, int protocol)
 
        sock->ops = &sco_sock_ops;
 
-       if (!(sk = sco_sock_alloc(sock, protocol, GFP_KERNEL)))
+       sk = sco_sock_alloc(sock, protocol, GFP_ATOMIC);
+       if (!sk)
                return -ENOMEM;
 
        sco_sock_init(sk, NULL);
@@ -967,7 +968,8 @@ static int __init sco_init(void)
                goto error;
        }
 
-       class_create_file(bt_class, &class_attr_sco);
+       if (class_create_file(bt_class, &class_attr_sco) < 0)
+               BT_ERR("Failed to create SCO info file");
 
        BT_INFO("SCO (Voice Link) ver %s", VERSION);
        BT_INFO("SCO socket layer initialized");
index 3a73b8c94271c94beaf3159a078a4fa46c2bb6b0..d9f04864d15d859d0ada8bbb57564999e55eb676 100644 (file)
@@ -128,7 +128,10 @@ void br_fdb_cleanup(unsigned long _data)
        mod_timer(&br->gc_timer, jiffies + HZ/10);
 }
 
-void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *p)
+
+void br_fdb_delete_by_port(struct net_bridge *br,
+                          const struct net_bridge_port *p,
+                          int do_all)
 {
        int i;
 
@@ -142,6 +145,8 @@ void br_fdb_delete_by_port(struct net_bridge *br, struct net_bridge_port *p)
                        if (f->dst != p) 
                                continue;
 
+                       if (f->is_static && !do_all)
+                               continue;
                        /*
                         * if multiple ports all have the same device address
                         * then when one port is deleted, assign
index b1211d5342f6cac5f43926a2c6a39f1651cb6761..f753c40c11d25743d6d13982443b7ebaa1020e38 100644 (file)
@@ -163,7 +163,7 @@ static void del_nbp(struct net_bridge_port *p)
        br_stp_disable_port(p);
        spin_unlock_bh(&br->lock);
 
-       br_fdb_delete_by_port(br, p);
+       br_fdb_delete_by_port(br, p, 1);
 
        list_del_rcu(&p->list);
 
@@ -448,7 +448,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
 
        return 0;
 err2:
-       br_fdb_delete_by_port(br, p);
+       br_fdb_delete_by_port(br, p, 1);
 err1:
        kobject_del(&p->kobj);
 err0:
index c491fb2f280ebf7b1cf2395371c43d33bf36f43c..74258d86f256daf06b1717331026798640820b87 100644 (file)
@@ -143,7 +143,7 @@ extern void br_fdb_changeaddr(struct net_bridge_port *p,
                              const unsigned char *newaddr);
 extern void br_fdb_cleanup(unsigned long arg);
 extern void br_fdb_delete_by_port(struct net_bridge *br,
-                          struct net_bridge_port *p);
+                                 const struct net_bridge_port *p, int do_all);
 extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
                                                 const unsigned char *addr);
 extern struct net_bridge_fdb_entry *br_fdb_get(struct net_bridge *br,
index 14cd025079af0393abba67e7c3704dc2bd688470..d294224592db0455e0bd31d7f04ab6a290abc21e 100644 (file)
@@ -113,6 +113,8 @@ void br_stp_disable_port(struct net_bridge_port *p)
        del_timer(&p->forward_delay_timer);
        del_timer(&p->hold_timer);
 
+       br_fdb_delete_by_port(br, p, 0);
+
        br_configuration_update(br);
 
        br_port_state_selection(br);
index d5d69fa15d07a65a26975797d654813e5f055568..52d32f1bc7281c6da3276ae78a0cad1e63aa4426 100644 (file)
@@ -285,8 +285,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
 
        if (i > 0) {
                int cmlen = CMSG_COMPAT_LEN(i * sizeof(int));
-               if (!err)
-                       err = put_user(SOL_SOCKET, &cm->cmsg_level);
+               err = put_user(SOL_SOCKET, &cm->cmsg_level);
                if (!err)
                        err = put_user(SCM_RIGHTS, &cm->cmsg_type);
                if (!err)
index 4d891beab13899cd96c74b8a6a88b5ef5d6aba43..81c426adcd1ec66b760f1447d6cd9dfe4402e574 100644 (file)
@@ -3502,8 +3502,6 @@ static int __init net_dev_init(void)
 
        BUG_ON(!dev_boot_phase);
 
-       net_random_init();
-
        if (dev_proc_init())
                goto out;
 
index f23e7e38654319682cd2f92d8feadf5c172615f1..b16d31ae5e54db47078a2f6039e1a776e6a02578 100644 (file)
@@ -85,6 +85,14 @@ static void flow_cache_new_hashrnd(unsigned long arg)
        add_timer(&flow_hash_rnd_timer);
 }
 
+static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
+{
+       if (fle->object)
+               atomic_dec(fle->object_ref);
+       kmem_cache_free(flow_cachep, fle);
+       flow_count(cpu)--;
+}
+
 static void __flow_cache_shrink(int cpu, int shrink_to)
 {
        struct flow_cache_entry *fle, **flp;
@@ -100,10 +108,7 @@ static void __flow_cache_shrink(int cpu, int shrink_to)
                }
                while ((fle = *flp) != NULL) {
                        *flp = fle->next;
-                       if (fle->object)
-                               atomic_dec(fle->object_ref);
-                       kmem_cache_free(flow_cachep, fle);
-                       flow_count(cpu)--;
+                       flow_entry_kill(cpu, fle);
                }
        }
 }
@@ -220,24 +225,33 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
 
 nocache:
        {
+               int err;
                void *obj;
                atomic_t *obj_ref;
 
-               resolver(key, family, dir, &obj, &obj_ref);
+               err = resolver(key, family, dir, &obj, &obj_ref);
 
                if (fle) {
-                       fle->genid = atomic_read(&flow_cache_genid);
-
-                       if (fle->object)
-                               atomic_dec(fle->object_ref);
-
-                       fle->object = obj;
-                       fle->object_ref = obj_ref;
-                       if (obj)
-                               atomic_inc(fle->object_ref);
+                       if (err) {
+                               /* Force security policy check on next lookup */
+                               *head = fle->next;
+                               flow_entry_kill(cpu, fle);
+                       } else {
+                               fle->genid = atomic_read(&flow_cache_genid);
+
+                               if (fle->object)
+                                       atomic_dec(fle->object_ref);
+
+                               fle->object = obj;
+                               fle->object_ref = obj_ref;
+                               if (obj)
+                                       atomic_inc(fle->object_ref);
+                       }
                }
                local_bh_enable();
 
+               if (err)
+                       obj = ERR_PTR(err);
                return obj;
        }
 }
index 221e4038216b8da1e61a67b32087eb146b02af3c..02f3c794789815e5a39717e3caf6eb3119c035e9 100644 (file)
@@ -602,7 +602,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
                goto errout;
        }
 
-       err = rtnl_unicast(skb, NETLINK_CB(skb).pid);
+       err = rtnl_unicast(nskb, NETLINK_CB(skb).pid);
 errout:
        kfree(iw_buf);
        dev_put(dev);
index 649d01ef35b6dc4dca5f3b74b593e7f5bfc272d2..271cf060ef8c69fb79f37768972af65b5457d835 100644 (file)
@@ -245,8 +245,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
        if (i > 0)
        {
                int cmlen = CMSG_LEN(i*sizeof(int));
-               if (!err)
-                       err = put_user(SOL_SOCKET, &cm->cmsg_level);
+               err = put_user(SOL_SOCKET, &cm->cmsg_level);
                if (!err)
                        err = put_user(SCM_RIGHTS, &cm->cmsg_type);
                if (!err)
index 94c5d761c830e3a156501b9e0320ec16929d5564..d93fe64f6693b3e23624c27741e1d0ffa167663e 100644 (file)
 #include <asm/system.h>
 #include <asm/uaccess.h>
 
-/*
-  This is a maximally equidistributed combined Tausworthe generator
-  based on code from GNU Scientific Library 1.5 (30 Jun 2004)
-
-   x_n = (s1_n ^ s2_n ^ s3_n) 
-
-   s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
-   s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
-   s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
-
-   The period of this generator is about 2^88.
-
-   From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
-   Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
-
-   This is available on the net from L'Ecuyer's home page,
-
-   http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
-   ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps 
-
-   There is an erratum in the paper "Tables of Maximally
-   Equidistributed Combined LFSR Generators", Mathematics of
-   Computation, 68, 225 (1999), 261--269:
-   http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
-
-        ... the k_j most significant bits of z_j must be non-
-        zero, for each j. (Note: this restriction also applies to the 
-        computer code given in [4], but was mistakenly not mentioned in
-        that paper.)
-   
-   This affects the seeding procedure by imposing the requirement
-   s1 > 1, s2 > 7, s3 > 15.
-
-*/
-struct nrnd_state {
-       u32 s1, s2, s3;
-};
-
-static DEFINE_PER_CPU(struct nrnd_state, net_rand_state);
-
-static u32 __net_random(struct nrnd_state *state)
-{
-#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
-
-       state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
-       state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
-       state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
-
-       return (state->s1 ^ state->s2 ^ state->s3);
-}
-
-static void __net_srandom(struct nrnd_state *state, unsigned long s)
-{
-       if (s == 0)
-               s = 1;      /* default seed is 1 */
-
-#define LCG(n) (69069 * n)
-       state->s1 = LCG(s);
-       state->s2 = LCG(state->s1);
-       state->s3 = LCG(state->s2);
-
-       /* "warm it up" */
-       __net_random(state);
-       __net_random(state);
-       __net_random(state);
-       __net_random(state);
-       __net_random(state);
-       __net_random(state);
-}
-
-
-unsigned long net_random(void)
-{
-       unsigned long r;
-       struct nrnd_state *state = &get_cpu_var(net_rand_state);
-       r = __net_random(state);
-       put_cpu_var(state);
-       return r;
-}
-
-
-void net_srandom(unsigned long entropy)
-{
-       struct nrnd_state *state = &get_cpu_var(net_rand_state);
-       __net_srandom(state, state->s1^entropy);
-       put_cpu_var(state);
-}
-
-void __init net_random_init(void)
-{
-       int i;
-
-       for_each_possible_cpu(i) {
-               struct nrnd_state *state = &per_cpu(net_rand_state,i);
-               __net_srandom(state, i+jiffies);
-       }
-}
-
-static int net_random_reseed(void)
-{
-       int i;
-       unsigned long seed;
-
-       for_each_possible_cpu(i) {
-               struct nrnd_state *state = &per_cpu(net_rand_state,i);
-
-               get_random_bytes(&seed, sizeof(seed));
-               __net_srandom(state, seed);
-       }
-       return 0;
-}
-late_initcall(net_random_reseed);
-
 int net_msg_cost = 5*HZ;
 int net_msg_burst = 10;
 
@@ -153,10 +40,7 @@ int net_ratelimit(void)
 {
        return __printk_ratelimit(net_msg_cost, net_msg_burst);
 }
-
-EXPORT_SYMBOL(net_random);
 EXPORT_SYMBOL(net_ratelimit);
-EXPORT_SYMBOL(net_srandom);
 
 /*
  * Convert an ASCII string to binary IP.
index bf692c1c116f69a1384d7124c03212ee2d504ebb..7e746c4c1688f41293ec6f67dcc512092854260c 100644 (file)
@@ -311,7 +311,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
        }
 
        if (sk->sk_state == DCCP_TIME_WAIT) {
-               inet_twsk_put((struct inet_timewait_sock *)sk);
+               inet_twsk_put(inet_twsk(sk));
                return;
        }
 
@@ -614,7 +614,7 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
                        bh_lock_sock(nsk);
                        return nsk;
                }
-               inet_twsk_put((struct inet_timewait_sock *)nsk);
+               inet_twsk_put(inet_twsk(nsk));
                return NULL;
        }
 
@@ -980,7 +980,7 @@ discard_and_relse:
        goto discard_it;
 
 do_time_wait:
-       inet_twsk_put((struct inet_timewait_sock *)sk);
+       inet_twsk_put(inet_twsk(sk));
        goto no_dccp_socket;
 }
 
index 7a47399cf31fd81817c83b41894d4184a88481e4..7171a78671aa9b0e73732db88e5bee406b5ec445 100644 (file)
@@ -285,7 +285,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        }
 
        if (sk->sk_state == DCCP_TIME_WAIT) {
-               inet_twsk_put((struct inet_timewait_sock *)sk);
+               inet_twsk_put(inet_twsk(sk));
                return;
        }
 
@@ -663,7 +663,7 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
                        bh_lock_sock(nsk);
                        return nsk;
                }
-               inet_twsk_put((struct inet_timewait_sock *)nsk);
+               inet_twsk_put(inet_twsk(nsk));
                return NULL;
        }
 
@@ -1109,7 +1109,7 @@ discard_and_relse:
        goto discard_it;
 
 do_time_wait:
-       inet_twsk_put((struct inet_timewait_sock *)sk);
+       inet_twsk_put(inet_twsk(sk));
        goto no_dccp_socket;
 }
 
index 70e027375682cdf52030314be662ed7477b4e888..3456cd331835c738286d6a2cf6c67a5de8aee16b 100644 (file)
@@ -1178,8 +1178,10 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len
        if (peer) {
                if ((sock->state != SS_CONNECTED && 
                     sock->state != SS_CONNECTING) && 
-                   scp->accept_mode == ACC_IMMED)
+                   scp->accept_mode == ACC_IMMED) {
+                       release_sock(sk);
                        return -ENOTCONN;
+               }
 
                memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
        } else {
index dd0761e3d280cc311cbdf88b278b5e91cd4951ab..491429ce9394cd25ca5166eed6a8c4f8f70350bc 100644 (file)
@@ -267,9 +267,14 @@ static void dn_dst_link_failure(struct sk_buff *skb)
 
 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
 {
-       return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 &&
-               fl1->oif == fl2->oif &&
-               fl1->iif == fl2->iif;
+       return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) |
+               (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) |
+#ifdef CONFIG_DECNET_ROUTE_FWMARK
+               (fl1->nl_u.dn_u.fwmark ^ fl2->nl_u.dn_u.fwmark) |
+#endif
+               (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) |
+               (fl1->oif ^ fl2->oif) |
+               (fl1->iif ^ fl2->iif)) == 0;
 }
 
 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
index a8e2e879a64764c31bbcc2626cb6779a5aba04fb..e2077a3aa8c097156c34e4e12fd1f4a36320af08 100644 (file)
@@ -43,6 +43,7 @@
 #include <net/tcp.h>
 #include <net/netlabel.h>
 #include <net/cipso_ipv4.h>
+#include <asm/atomic.h>
 #include <asm/bug.h>
 
 struct cipso_v4_domhsh_entry {
@@ -79,7 +80,7 @@ struct cipso_v4_map_cache_entry {
        unsigned char *key;
        size_t key_len;
 
-       struct netlbl_lsm_cache lsm_data;
+       struct netlbl_lsm_cache *lsm_data;
 
        u32 activity;
        struct list_head list;
@@ -188,13 +189,14 @@ static void cipso_v4_doi_domhsh_free(struct rcu_head *entry)
  * @entry: the entry to free
  *
  * Description:
- * This function frees the memory associated with a cache entry.
+ * This function frees the memory associated with a cache entry including the
+ * LSM cache data if there are no longer any users, i.e. reference count == 0.
  *
  */
 static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry)
 {
-       if (entry->lsm_data.free)
-               entry->lsm_data.free(entry->lsm_data.data);
+       if (entry->lsm_data)
+               netlbl_secattr_cache_free(entry->lsm_data);
        kfree(entry->key);
        kfree(entry);
 }
@@ -315,8 +317,8 @@ static int cipso_v4_cache_check(const unsigned char *key,
                    entry->key_len == key_len &&
                    memcmp(entry->key, key, key_len) == 0) {
                        entry->activity += 1;
-                       secattr->cache.free = entry->lsm_data.free;
-                       secattr->cache.data = entry->lsm_data.data;
+                       atomic_inc(&entry->lsm_data->refcount);
+                       secattr->cache = entry->lsm_data;
                        if (prev_entry == NULL) {
                                spin_unlock_bh(&cipso_v4_cache[bkt].lock);
                                return 0;
@@ -383,8 +385,8 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
        memcpy(entry->key, cipso_ptr, cipso_ptr_len);
        entry->key_len = cipso_ptr_len;
        entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
-       entry->lsm_data.free = secattr->cache.free;
-       entry->lsm_data.data = secattr->cache.data;
+       atomic_inc(&secattr->cache->refcount);
+       entry->lsm_data = secattr->cache;
 
        bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
        spin_lock_bh(&cipso_v4_cache[bkt].lock);
@@ -771,13 +773,15 @@ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def,
 {
        int cat = -1;
        u32 bitmap_len_bits = bitmap_len * 8;
-       u32 cipso_cat_size = doi_def->map.std->cat.cipso_size;
-       u32 *cipso_array = doi_def->map.std->cat.cipso;
+       u32 cipso_cat_size;
+       u32 *cipso_array;
 
        switch (doi_def->type) {
        case CIPSO_V4_MAP_PASS:
                return 0;
        case CIPSO_V4_MAP_STD:
+               cipso_cat_size = doi_def->map.std->cat.cipso_size;
+               cipso_array = doi_def->map.std->cat.cipso;
                for (;;) {
                        cat = cipso_v4_bitmap_walk(bitmap,
                                                   bitmap_len_bits,
@@ -823,19 +827,21 @@ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def,
        u32 net_spot_max = 0;
        u32 host_clen_bits = host_cat_len * 8;
        u32 net_clen_bits = net_cat_len * 8;
-       u32 host_cat_size = doi_def->map.std->cat.local_size;
-       u32 *host_cat_array = doi_def->map.std->cat.local;
+       u32 host_cat_size;
+       u32 *host_cat_array;
 
        switch (doi_def->type) {
        case CIPSO_V4_MAP_PASS:
-               net_spot_max = host_cat_len - 1;
-               while (net_spot_max > 0 && host_cat[net_spot_max] == 0)
+               net_spot_max = host_cat_len;
+               while (net_spot_max > 0 && host_cat[net_spot_max - 1] == 0)
                        net_spot_max--;
                if (net_spot_max > net_cat_len)
                        return -EINVAL;
                memcpy(net_cat, host_cat, net_spot_max);
                return net_spot_max;
        case CIPSO_V4_MAP_STD:
+               host_cat_size = doi_def->map.std->cat.local_size;
+               host_cat_array = doi_def->map.std->cat.local;
                for (;;) {
                        host_spot = cipso_v4_bitmap_walk(host_cat,
                                                         host_clen_bits,
@@ -891,8 +897,8 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
        int net_spot = -1;
        u32 net_clen_bits = net_cat_len * 8;
        u32 host_clen_bits = host_cat_len * 8;
-       u32 net_cat_size = doi_def->map.std->cat.cipso_size;
-       u32 *net_cat_array = doi_def->map.std->cat.cipso;
+       u32 net_cat_size;
+       u32 *net_cat_array;
 
        switch (doi_def->type) {
        case CIPSO_V4_MAP_PASS:
@@ -901,6 +907,8 @@ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
                memcpy(host_cat, net_cat, net_cat_len);
                return net_cat_len;
        case CIPSO_V4_MAP_STD:
+               net_cat_size = doi_def->map.std->cat.cipso_size;
+               net_cat_array = doi_def->map.std->cat.cipso;
                for (;;) {
                        net_spot = cipso_v4_bitmap_walk(net_cat,
                                                        net_clen_bits,
index 2b1a54b59c48c4f2a65d6f8838bbcfd567a71183..f072f3875af8dfd5c6505787230cc578d8085c1c 100644 (file)
@@ -94,10 +94,8 @@ int inet_peer_minttl = 120 * HZ;     /* TTL under high load: 120 sec */
 int inet_peer_maxttl = 10 * 60 * HZ;   /* usual time to live: 10 min */
 
 static struct inet_peer *inet_peer_unused_head;
-/* Exported for inet_putpeer inline function.  */
-struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
-DEFINE_SPINLOCK(inet_peer_unused_lock);
-#define PEER_MAX_CLEANUP_WORK 30
+static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
+static DEFINE_SPINLOCK(inet_peer_unused_lock);
 
 static void peer_check_expire(unsigned long dummy);
 static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -340,7 +338,8 @@ static int cleanup_once(unsigned long ttl)
        spin_lock_bh(&inet_peer_unused_lock);
        p = inet_peer_unused_head;
        if (p != NULL) {
-               if (time_after(p->dtime + ttl, jiffies)) {
+               __u32 delta = (__u32)jiffies - p->dtime;
+               if (delta < ttl) {
                        /* Do not prune fresh entries. */
                        spin_unlock_bh(&inet_peer_unused_lock);
                        return -1;
@@ -432,7 +431,7 @@ out_free:
 /* Called with local BH disabled. */
 static void peer_check_expire(unsigned long dummy)
 {
-       int i;
+       unsigned long now = jiffies;
        int ttl;
 
        if (peer_total >= inet_peer_threshold)
@@ -441,7 +440,10 @@ static void peer_check_expire(unsigned long dummy)
                ttl = inet_peer_maxttl
                                - (inet_peer_maxttl - inet_peer_minttl) / HZ *
                                        peer_total / inet_peer_threshold * HZ;
-       for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++);
+       while (!cleanup_once(ttl)) {
+               if (jiffies != now)
+                       break;
+       }
 
        /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
         * interval depending on the total number of entries (more entries,
@@ -455,3 +457,16 @@ static void peer_check_expire(unsigned long dummy)
                                peer_total / inet_peer_threshold * HZ;
        add_timer(&peer_periodic_timer);
 }
+
+void inet_putpeer(struct inet_peer *p)
+{
+       spin_lock_bh(&inet_peer_unused_lock);
+       if (atomic_dec_and_test(&p->refcnt)) {
+               p->unused_prevp = inet_peer_unused_tailp;
+               p->unused_next = NULL;
+               *inet_peer_unused_tailp = p;
+               inet_peer_unused_tailp = &p->unused_next;
+               p->dtime = (__u32)jiffies;
+       }
+       spin_unlock_bh(&inet_peer_unused_lock);
+}
index f5fba051df3da84e7000241cb54ac82edc7babe6..d5b5dec075b81848d8345651872257285a0f361d 100644 (file)
@@ -611,8 +611,8 @@ static int ipgre_rcv(struct sk_buff *skb)
                 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
                 */
                if (flags == 0 &&
-                   skb->protocol == __constant_htons(ETH_P_WCCP)) {
-                       skb->protocol = __constant_htons(ETH_P_IP);
+                   skb->protocol == htons(ETH_P_WCCP)) {
+                       skb->protocol = htons(ETH_P_IP);
                        if ((*(h + offset) & 0xF0) != 0x40) 
                                offset += 4;
                }
index 17e1a687ab4553e76f53a40029c4f87599d49df9..0849f1cced13364b014ef787157e1d7c76995f4e 100644 (file)
@@ -1196,6 +1196,8 @@ err1:
 static void __exit arp_tables_fini(void)
 {
        nf_unregister_sockopt(&arpt_sockopts);
+       xt_unregister_target(&arpt_error_target);
+       xt_unregister_target(&arpt_standard_target);
        xt_proto_fini(NF_ARP);
 }
 
index 53b6dffea6c2174fcf49f30e7844010c015a3772..262d0d44ec1b5924aff8a3d7efe4ec6a1fbb2733 100644 (file)
@@ -44,13 +44,6 @@ MODULE_LICENSE("GPL");
 
 static char __initdata version[] = "0.90";
 
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-
 static inline int
 ctnetlink_dump_tuples_proto(struct sk_buff *skb, 
                            const struct ip_conntrack_tuple *tuple,
@@ -398,7 +391,6 @@ nfattr_failure:
 
 static int ctnetlink_done(struct netlink_callback *cb)
 {
-       DEBUGP("entered %s\n", __FUNCTION__);
        if (cb->args[1])
                ip_conntrack_put((struct ip_conntrack *)cb->args[1]);
        return 0;
@@ -411,9 +403,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        struct ip_conntrack_tuple_hash *h;
        struct list_head *i;
 
-       DEBUGP("entered %s, last bucket=%lu id=%u\n", __FUNCTION__, 
-                       cb->args[0], *id);
-
        read_lock_bh(&ip_conntrack_lock);
        last = (struct ip_conntrack *)cb->args[1];
        for (; cb->args[0] < ip_conntrack_htable_size; cb->args[0]++) {
@@ -452,7 +441,6 @@ out:
        if (last)
                ip_conntrack_put(last);
 
-       DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
        return skb->len;
 }
 
@@ -466,8 +454,6 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple)
 {
        struct nfattr *tb[CTA_IP_MAX];
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        nfattr_parse_nested(tb, CTA_IP_MAX, attr);
 
        if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip))
@@ -481,8 +467,6 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple)
                return -EINVAL;
        tuple->dst.ip = *(__be32 *)NFA_DATA(tb[CTA_IP_V4_DST-1]);
 
-       DEBUGP("leaving\n");
-
        return 0;
 }
 
@@ -503,8 +487,6 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr,
        struct ip_conntrack_protocol *proto;
        int ret = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        nfattr_parse_nested(tb, CTA_PROTO_MAX, attr);
 
        if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
@@ -531,8 +513,6 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct ip_conntrack_tuple *tuple,
        struct nfattr *tb[CTA_TUPLE_MAX];
        int err;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        memset(tuple, 0, sizeof(*tuple));
 
        nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]);
@@ -557,10 +537,6 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct ip_conntrack_tuple *tuple,
        else
                tuple->dst.dir = IP_CT_DIR_ORIGINAL;
 
-       DUMP_TUPLE(tuple);
-
-       DEBUGP("leaving\n");
-
        return 0;
 }
 
@@ -577,8 +553,6 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
        struct nfattr *tb[CTA_PROTONAT_MAX];
        struct ip_nat_protocol *npt;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr);
 
        if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat))
@@ -597,7 +571,6 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
 
        ip_nat_proto_put(npt);
 
-       DEBUGP("leaving\n");
        return 0;
 }
 
@@ -613,8 +586,6 @@ ctnetlink_parse_nat(struct nfattr *nat,
        struct nfattr *tb[CTA_NAT_MAX];
        int err;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        memset(range, 0, sizeof(*range));
        
        nfattr_parse_nested(tb, CTA_NAT_MAX, nat);
@@ -640,7 +611,6 @@ ctnetlink_parse_nat(struct nfattr *nat,
        if (err < 0)
                return err;
 
-       DEBUGP("leaving\n");
        return 0;
 }
 #endif
@@ -650,8 +620,6 @@ ctnetlink_parse_help(struct nfattr *attr, char **helper_name)
 {
        struct nfattr *tb[CTA_HELP_MAX];
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        nfattr_parse_nested(tb, CTA_HELP_MAX, attr);
 
        if (!tb[CTA_HELP_NAME-1])
@@ -679,8 +647,6 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        struct ip_conntrack *ct;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (nfattr_bad_size(cda, CTA_MAX, cta_min))
                return -EINVAL;
 
@@ -698,10 +664,8 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        h = ip_conntrack_find_get(&tuple, NULL);
-       if (!h) {
-               DEBUGP("tuple not found in conntrack hash\n");
+       if (!h)
                return -ENOENT;
-       }
 
        ct = tuplehash_to_ctrack(h);
        
@@ -716,7 +680,6 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
                ct->timeout.function((unsigned long)ct);
 
        ip_conntrack_put(ct);
-       DEBUGP("leaving\n");
 
        return 0;
 }
@@ -731,8 +694,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
        struct sk_buff *skb2 = NULL;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct nfgenmsg *msg = NLMSG_DATA(nlh);
                u32 rlen;
@@ -770,11 +731,9 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        h = ip_conntrack_find_get(&tuple, NULL);
-       if (!h) {
-               DEBUGP("tuple not found in conntrack hash");
+       if (!h)
                return -ENOENT;
-       }
-       DEBUGP("tuple found\n");
+
        ct = tuplehash_to_ctrack(h);
 
        err = -ENOMEM;
@@ -795,7 +754,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
        if (err < 0)
                goto out;
 
-       DEBUGP("leaving\n");
        return 0;
 
 free:
@@ -866,8 +824,6 @@ ctnetlink_change_helper(struct ip_conntrack *ct, struct nfattr *cda[])
        char *helpname;
        int err;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        /* don't change helper of sibling connections */
        if (ct->master)
                return -EINVAL;
@@ -938,8 +894,6 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
 {
        int err;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (cda[CTA_HELP-1]) {
                err = ctnetlink_change_helper(ct, cda);
                if (err < 0)
@@ -969,7 +923,6 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
                ct->mark = ntohl(*(__be32 *)NFA_DATA(cda[CTA_MARK-1]));
 #endif
 
-       DEBUGP("all done\n");
        return 0;
 }
 
@@ -981,8 +934,6 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
        struct ip_conntrack *ct;
        int err = -EINVAL;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        ct = ip_conntrack_alloc(otuple, rtuple);
        if (ct == NULL || IS_ERR(ct))
                return -ENOMEM; 
@@ -1017,7 +968,6 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
        if (ct->helper)
                ip_conntrack_helper_put(ct->helper);
 
-       DEBUGP("conntrack with id %u inserted\n", ct->id);
        return 0;
 
 err:   
@@ -1033,8 +983,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
        struct ip_conntrack_tuple_hash *h = NULL;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (nfattr_bad_size(cda, CTA_MAX, cta_min))
                return -EINVAL;
 
@@ -1058,7 +1006,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
 
        if (h == NULL) {
                write_unlock_bh(&ip_conntrack_lock);
-               DEBUGP("no such conntrack, create new\n");
                err = -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_CREATE)
                        err = ctnetlink_create_conntrack(cda, &otuple, &rtuple);
@@ -1074,7 +1021,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
 
        /* We manipulate the conntrack inside the global conntrack table lock,
         * so there's no need to increase the refcount */
-       DEBUGP("conntrack found\n");
        err = -EEXIST;
        if (!(nlh->nlmsg_flags & NLM_F_EXCL))
                err = ctnetlink_change_conntrack(tuplehash_to_ctrack(h), cda);
@@ -1249,8 +1195,6 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        struct list_head *i;
        u_int32_t *id = (u_int32_t *) &cb->args[0];
 
-       DEBUGP("entered %s, last id=%llu\n", __FUNCTION__, *id);
-
        read_lock_bh(&ip_conntrack_lock);
        list_for_each_prev(i, &ip_conntrack_expect_list) {
                exp = (struct ip_conntrack_expect *) i;
@@ -1266,8 +1210,6 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
 out:   
        read_unlock_bh(&ip_conntrack_lock);
 
-       DEBUGP("leaving, last id=%llu\n", *id);
-
        return skb->len;
 }
 
@@ -1285,8 +1227,6 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
        struct sk_buff *skb2;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
                return -EINVAL;
 
@@ -1437,8 +1377,6 @@ ctnetlink_create_expect(struct nfattr *cda[])
        struct ip_conntrack *ct;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        /* caller guarantees that those three CTA_EXPECT_* exist */
        err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE);
        if (err < 0)
@@ -1490,8 +1428,6 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
        struct ip_conntrack_expect *exp;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);   
-
        if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
                return -EINVAL;
 
@@ -1520,8 +1456,6 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
                err = ctnetlink_change_expect(exp, cda);
        write_unlock_bh(&ip_conntrack_lock);
 
-       DEBUGP("leaving\n");
-       
        return err;
 }
 
index 12a818a2462f2a8caf9f8fea514d401fd1d7311b..1aa4517fbcdb454f02e70933484cae5a9b4b2382 100644 (file)
@@ -28,7 +28,7 @@ static inline int
 set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
 {
        struct iphdr *iph = (*pskb)->nh.iph;
-       __be16 oldtos;
+       u_int16_t oldtos;
 
        if ((iph->tos & IPT_ECN_IP_MASK) != (einfo->ip_ect & IPT_ECN_IP_MASK)) {
                if (!skb_make_writable(pskb, sizeof(struct iphdr)))
@@ -37,8 +37,8 @@ set_ect_ip(struct sk_buff **pskb, const struct ipt_ECN_info *einfo)
                oldtos = iph->tos;
                iph->tos &= ~IPT_ECN_IP_MASK;
                iph->tos |= (einfo->ip_ect & IPT_ECN_IP_MASK);
-               iph->check = nf_csum_update(oldtos ^ htons(0xFFFF), iph->tos,
-                                           iph->check);
+               iph->check = nf_csum_update(htons(oldtos) ^ htons(0xFFFF),
+                                           htons(iph->tos), iph->check);
        } 
        return 1;
 }
index 6b8b14ccc3d3ff9a0fa0610a0e08df6d274cf2ef..83b80b3a5d2f1de5b5aebd891ae164c78771288c 100644 (file)
@@ -30,7 +30,7 @@ target(struct sk_buff **pskb,
 {
        const struct ipt_tos_target_info *tosinfo = targinfo;
        struct iphdr *iph = (*pskb)->nh.iph;
-       __be16 oldtos;
+       u_int16_t oldtos;
 
        if ((iph->tos & IPTOS_TOS_MASK) != tosinfo->tos) {
                if (!skb_make_writable(pskb, sizeof(struct iphdr)))
@@ -38,8 +38,8 @@ target(struct sk_buff **pskb,
                iph = (*pskb)->nh.iph;
                oldtos = iph->tos;
                iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos;
-               iph->check = nf_csum_update(oldtos ^ htons(0xFFFF), iph->tos,
-                                           iph->check);
+               iph->check = nf_csum_update(htons(oldtos) ^ htons(0xFFFF),
+                                           htons(iph->tos), iph->check);
        }
        return IPT_CONTINUE;
 }
index c41ddba02e9d3553dac0f4aca6d7c5c8b9975584..925ee4dfc32c15039e68ddc47274239833f9cab9 100644 (file)
@@ -566,9 +566,15 @@ static inline u32 rt_score(struct rtable *rt)
 
 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
 {
-       return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 &&
-              fl1->oif     == fl2->oif &&
-              fl1->iif     == fl2->iif;
+       return ((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
+               (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
+#ifdef CONFIG_IP_ROUTE_FWMARK
+               (fl1->nl_u.ip4_u.fwmark ^ fl2->nl_u.ip4_u.fwmark) |
+#endif
+               (*(u16 *)&fl1->nl_u.ip4_u.tos ^
+                *(u16 *)&fl2->nl_u.ip4_u.tos) |
+               (fl1->oif ^ fl2->oif) |
+               (fl1->iif ^ fl2->iif)) == 0;
 }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
index c83938b8fcb1201ddf403c6423cd83790397595b..6bbd98575172b0f140897c5be3cec7d46b42fcdf 100644 (file)
@@ -355,7 +355,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                return;
        }
        if (sk->sk_state == TCP_TIME_WAIT) {
-               inet_twsk_put((struct inet_timewait_sock *)sk);
+               inet_twsk_put(inet_twsk(sk));
                return;
        }
 
@@ -578,7 +578,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
        struct tcphdr *th = skb->h.th;
        struct {
                struct tcphdr th;
-               u32 tsopt[3];
+               u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2];
        } rep;
        struct ip_reply_arg arg;
 
@@ -960,7 +960,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
                        bh_lock_sock(nsk);
                        return nsk;
                }
-               inet_twsk_put((struct inet_timewait_sock *)nsk);
+               inet_twsk_put(inet_twsk(nsk));
                return NULL;
        }
 
@@ -1154,26 +1154,24 @@ discard_and_relse:
 
 do_time_wait:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-               inet_twsk_put((struct inet_timewait_sock *) sk);
+               inet_twsk_put(inet_twsk(sk));
                goto discard_it;
        }
 
        if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
                TCP_INC_STATS_BH(TCP_MIB_INERRS);
-               inet_twsk_put((struct inet_timewait_sock *) sk);
+               inet_twsk_put(inet_twsk(sk));
                goto discard_it;
        }
-       switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
-                                          skb, th)) {
+       switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
        case TCP_TW_SYN: {
                struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo,
                                                        skb->nh.iph->daddr,
                                                        th->dest,
                                                        inet_iif(skb));
                if (sk2) {
-                       inet_twsk_deschedule((struct inet_timewait_sock *)sk,
-                                            &tcp_death_row);
-                       inet_twsk_put((struct inet_timewait_sock *)sk);
+                       inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
+                       inet_twsk_put(inet_twsk(sk));
                        sk = sk2;
                        goto process;
                }
index 9a253faefc81c9c2f95d82e9cbb7878d677b25a1..f22536e32cb117fa64b561d39bbf365de95966f4 100644 (file)
@@ -273,10 +273,10 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
                                         __u32 tstamp)
 {
        if (tp->rx_opt.tstamp_ok) {
-               *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
-                                         (TCPOPT_NOP << 16) |
-                                         (TCPOPT_TIMESTAMP << 8) |
-                                         TCPOLEN_TIMESTAMP);
+               *ptr++ = htonl((TCPOPT_NOP << 24) |
+                              (TCPOPT_NOP << 16) |
+                              (TCPOPT_TIMESTAMP << 8) |
+                              TCPOLEN_TIMESTAMP);
                *ptr++ = htonl(tstamp);
                *ptr++ = htonl(tp->rx_opt.ts_recent);
        }
@@ -325,18 +325,27 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
        *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
        if (ts) {
                if(sack)
-                       *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
-                                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+                       *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
+                                      (TCPOLEN_SACK_PERM << 16) |
+                                      (TCPOPT_TIMESTAMP << 8) |
+                                      TCPOLEN_TIMESTAMP);
                else
-                       *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
-                                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+                       *ptr++ = htonl((TCPOPT_NOP << 24) |
+                                      (TCPOPT_NOP << 16) |
+                                      (TCPOPT_TIMESTAMP << 8) |
+                                      TCPOLEN_TIMESTAMP);
                *ptr++ = htonl(tstamp);         /* TSVAL */
                *ptr++ = htonl(ts_recent);      /* TSECR */
        } else if(sack)
-               *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
-                                         (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
+               *ptr++ = htonl((TCPOPT_NOP << 24) |
+                              (TCPOPT_NOP << 16) |
+                              (TCPOPT_SACK_PERM << 8) |
+                              TCPOLEN_SACK_PERM);
        if (offer_wscale)
-               *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
+               *ptr++ = htonl((TCPOPT_NOP << 24) |
+                              (TCPOPT_WINDOW << 16) |
+                              (TCPOLEN_WINDOW << 8) |
+                              (wscale));
 }
 
 /* This routine actually transmits TCP packets queued in by
index 7a7a00147e55c7d63758d2d3bad84884d3b1a4c6..1bed0cdf53e30c42f7d9f0ea82f2c75b59429d7f 100644 (file)
@@ -52,7 +52,7 @@ __xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
                    xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
                    xdst->u.rt.fl.fl4_src == fl->fl4_src &&
                    xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
-                   xfrm_bundle_ok(xdst, fl, AF_INET, 0)) {
+                   xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
                        dst_clone(dst);
                        break;
                }
index a460e8132b4d471c0b282f47c58996ddd4f35315..ef5eaad448518aa02061d888f0cf2e83a6621001 100644 (file)
@@ -153,6 +153,19 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION
        ---help---
          Support for MIPv6 route optimization mode.
 
+config IPV6_SIT
+       tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)"
+       depends on IPV6
+       default y
+       ---help---
+         Tunneling means encapsulating data of one protocol type within
+         another protocol and sending it over a channel that understands the
+         encapsulating protocol. This driver implements encapsulation of IPv6
+         into IPv4 packets. This is useful if you want to connect two IPv6
+         networks over an IPv4-only path.
+
+         Saying M here will produce a module called sit.ko. If unsure, say Y.
+
 config IPV6_TUNNEL
        tristate "IPv6: IPv6-in-IPv6 tunnel"
        select INET6_TUNNEL
index 87274e47fe32736d7ec0261f44ef3d34ecd0ef64..addcc011bc01c2146577e0d1e2253cef64e0a93d 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_IPV6) += ipv6.o
 
-ipv6-objs :=   af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o sit.o \
+ipv6-objs :=   af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \
                route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o raw.o \
                protocol.o icmp.o mcast.o reassembly.o tcp_ipv6.o \
                exthdrs.o sysctl_net_ipv6.o datagram.o proc.o \
@@ -29,6 +29,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
 obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
 obj-$(CONFIG_NETFILTER)        += netfilter/
 
+obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
 
 obj-y += exthdrs_core.o
index e03c33b2465bc72415c8e27dce382db233959a69..b312a5f7a759caa298d42d4ce6763baac0b9ba04 100644 (file)
@@ -396,8 +396,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
        ndev->regen_timer.data = (unsigned long) ndev;
        if ((dev->flags&IFF_LOOPBACK) ||
            dev->type == ARPHRD_TUNNEL ||
-           dev->type == ARPHRD_NONE ||
-           dev->type == ARPHRD_SIT) {
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+           dev->type == ARPHRD_SIT ||
+#endif
+           dev->type == ARPHRD_NONE) {
                printk(KERN_INFO
                       "%s: Disabled Privacy Extensions\n",
                       dev->name);
@@ -1546,8 +1548,10 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
           This thing is done here expecting that the whole
           class of non-broadcast devices need not cloning.
         */
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
        if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
                cfg.fc_flags |= RTF_NONEXTHOP;
+#endif
 
        ip6_route_add(&cfg);
 }
@@ -1569,6 +1573,7 @@ static void addrconf_add_mroute(struct net_device *dev)
        ip6_route_add(&cfg);
 }
 
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
 static void sit_route_add(struct net_device *dev)
 {
        struct fib6_config cfg = {
@@ -1582,6 +1587,7 @@ static void sit_route_add(struct net_device *dev)
        /* prefix length - 96 bits "::d.d.d.d" */
        ip6_route_add(&cfg);
 }
+#endif
 
 static void addrconf_add_lroute(struct net_device *dev)
 {
@@ -1852,6 +1858,7 @@ int addrconf_set_dstaddr(void __user *arg)
        if (dev == NULL)
                goto err_exit;
 
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
        if (dev->type == ARPHRD_SIT) {
                struct ifreq ifr;
                mm_segment_t    oldfs;
@@ -1881,6 +1888,7 @@ int addrconf_set_dstaddr(void __user *arg)
                        err = dev_open(dev);
                }
        }
+#endif
 
 err_exit:
        rtnl_unlock();
@@ -2010,6 +2018,7 @@ int addrconf_del_ifaddr(void __user *arg)
        return err;
 }
 
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
 static void sit_add_v4_addrs(struct inet6_dev *idev)
 {
        struct inet6_ifaddr * ifp;
@@ -2078,6 +2087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
                }
         }
 }
+#endif
 
 static void init_loopback(struct net_device *dev)
 {
@@ -2141,6 +2151,7 @@ static void addrconf_dev_config(struct net_device *dev)
                addrconf_add_linklocal(idev, &addr);
 }
 
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
 static void addrconf_sit_config(struct net_device *dev)
 {
        struct inet6_dev *idev;
@@ -2166,6 +2177,7 @@ static void addrconf_sit_config(struct net_device *dev)
        } else
                sit_route_add(dev);
 }
+#endif
 
 static inline int
 ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
@@ -2260,9 +2272,11 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                }
 
                switch(dev->type) {
+#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
                case ARPHRD_SIT:
                        addrconf_sit_config(dev);
                        break;
+#endif
                case ARPHRD_TUNNEL6:
                        addrconf_ip6_tnl_config(dev);
                        break;
index e94eccb99707991c80409ebce8a157e1beaba3e7..858cae29581c8e129289ce09cf644fd5d70c9184 100644 (file)
@@ -850,7 +850,6 @@ static int __init inet6_init(void)
        err = addrconf_init();
        if (err)
                goto addrconf_fail;
-       sit_init();
 
        /* Init v6 extension headers. */
        ipv6_rthdr_init();
@@ -927,7 +926,6 @@ static void __exit inet6_exit(void)
        mip6_fini();
 #endif
        /* Cleanup code parts. */
-       sit_cleanup();
        ip6_flowlabel_cleanup();
        addrconf_cleanup();
        ip6_route_cleanup();
index d8c1057e8b008520f2f9c3a8e21e6de1bd90f757..1896ecb52899069a80b577748885b0d2489a6754 100644 (file)
@@ -117,12 +117,15 @@ static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
 {
        struct fib6_rule *r = (struct fib6_rule *) rule;
 
-       if (!ipv6_prefix_equal(&fl->fl6_dst, &r->dst.addr, r->dst.plen))
+       if (r->dst.plen &&
+           !ipv6_prefix_equal(&fl->fl6_dst, &r->dst.addr, r->dst.plen))
                return 0;
 
-       if ((flags & RT6_LOOKUP_F_HAS_SADDR) &&
-           !ipv6_prefix_equal(&fl->fl6_src, &r->src.addr, r->src.plen))
-               return 0;
+       if (r->src.plen) {
+               if (!(flags & RT6_LOOKUP_F_HAS_SADDR) ||
+                   !ipv6_prefix_equal(&fl->fl6_src, &r->src.addr, r->src.plen))
+                       return 0;
+       }
 
        if (r->tclass && r->tclass != ((ntohl(fl->fl6_flowlabel) >> 20) & 0xff))
                return 0;
index 0304b5fe8d6aa01d77378d4ff939e8f5f04c1d4f..41a8a5f06602b2a98c36225925765783cb21d075 100644 (file)
@@ -967,8 +967,6 @@ static void ndisc_recv_na(struct sk_buff *skb)
                    ipv6_devconf.forwarding && ipv6_devconf.proxy_ndp &&
                    pneigh_lookup(&nd_tbl, &msg->target, dev, 0)) {
                        /* XXX: idev->cnf.prixy_ndp */
-                       WARN_ON(skb->dst != NULL &&
-                               ((struct rt6_info *)skb->dst)->rt6i_idev);
                        goto out;
                }
 
index d6b4b4f48d18cb6d9736c520b854d0899c98bc23..a1b0f075462e0e64fd76a91cd6696765e0f8b6e4 100644 (file)
@@ -529,13 +529,17 @@ struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
                .nl_u = {
                        .ip6_u = {
                                .daddr = *daddr,
-                               /* TODO: saddr */
                        },
                },
        };
        struct dst_entry *dst;
        int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
 
+       if (saddr) {
+               memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
+               flags |= RT6_LOOKUP_F_HAS_SADDR;
+       }
+
        dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_lookup);
        if (dst->error == 0)
                return (struct rt6_info *) dst;
@@ -697,6 +701,7 @@ out2:
 void ip6_route_input(struct sk_buff *skb)
 {
        struct ipv6hdr *iph = skb->nh.ipv6h;
+       int flags = RT6_LOOKUP_F_HAS_SADDR;
        struct flowi fl = {
                .iif = skb->dev->ifindex,
                .nl_u = {
@@ -711,7 +716,9 @@ void ip6_route_input(struct sk_buff *skb)
                },
                .proto = iph->nexthdr,
        };
-       int flags = rt6_need_strict(&iph->daddr) ? RT6_LOOKUP_F_IFACE : 0;
+
+       if (rt6_need_strict(&iph->daddr))
+               flags |= RT6_LOOKUP_F_IFACE;
 
        skb->dst = fib6_rule_lookup(&fl, flags, ip6_pol_route_input);
 }
@@ -794,6 +801,9 @@ struct dst_entry * ip6_route_output(struct sock *sk, struct flowi *fl)
        if (rt6_need_strict(&fl->fl6_dst))
                flags |= RT6_LOOKUP_F_IFACE;
 
+       if (!ipv6_addr_any(&fl->fl6_src))
+               flags |= RT6_LOOKUP_F_HAS_SADDR;
+
        return fib6_rule_lookup(fl, flags, ip6_pol_route_output);
 }
 
@@ -1345,6 +1355,7 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
                                           struct in6_addr *gateway,
                                           struct net_device *dev)
 {
+       int flags = RT6_LOOKUP_F_HAS_SADDR;
        struct ip6rd_flowi rdfl = {
                .fl = {
                        .oif = dev->ifindex,
@@ -1357,7 +1368,9 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
                },
                .gateway = *gateway,
        };
-       int flags = rt6_need_strict(dest) ? RT6_LOOKUP_F_IFACE : 0;
+
+       if (rt6_need_strict(dest))
+               flags |= RT6_LOOKUP_F_IFACE;
 
        return (struct rt6_info *)fib6_rule_lookup((struct flowi *)&rdfl, flags, __ip6_route_redirect);
 }
index 836eecd7e62bfaa100a616d5aeda26ecc652ada9..b481a4d780c239f229d9d28aae7c0239fc589f76 100644 (file)
@@ -850,3 +850,7 @@ int __init sit_init(void)
        inet_del_protocol(&sit_protocol, IPPROTO_IPV6);
        goto out;
 }
+
+module_init(sit_init);
+module_exit(sit_cleanup);
+MODULE_LICENSE("GPL");
index 3b6575478fcc381aae64ee2c0d7b2b791d5f1846..4c2a7c0cafef2db93c05e95f1345b02affca3ea9 100644 (file)
@@ -329,7 +329,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        }
 
        if (sk->sk_state == TCP_TIME_WAIT) {
-               inet_twsk_put((struct inet_timewait_sock *)sk);
+               inet_twsk_put(inet_twsk(sk));
                return;
        }
 
@@ -653,7 +653,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
        int tot_len = sizeof(struct tcphdr);
 
        if (ts)
-               tot_len += 3*4;
+               tot_len += TCPOLEN_TSTAMP_ALIGNED;
 
        buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
                         GFP_ATOMIC);
@@ -749,7 +749,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
                        bh_lock_sock(nsk);
                        return nsk;
                }
-               inet_twsk_put((struct inet_timewait_sock *)nsk);
+               inet_twsk_put(inet_twsk(nsk));
                return NULL;
        }
 
@@ -1283,18 +1283,17 @@ discard_and_relse:
 
 do_time_wait:
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
-               inet_twsk_put((struct inet_timewait_sock *)sk);
+               inet_twsk_put(inet_twsk(sk));
                goto discard_it;
        }
 
        if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
                TCP_INC_STATS_BH(TCP_MIB_INERRS);
-               inet_twsk_put((struct inet_timewait_sock *)sk);
+               inet_twsk_put(inet_twsk(sk));
                goto discard_it;
        }
 
-       switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
-                                          skb, th)) {
+       switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
        case TCP_TW_SYN:
        {
                struct sock *sk2;
index 6a252e2134d11cf8629d64c3dc56592f6625fec8..73cee2ec07e8ed22fcba162427ce91622e5d7f59 100644 (file)
@@ -73,7 +73,7 @@ __xfrm6_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
                                 xdst->u.rt6.rt6i_src.plen);
                if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) &&
                    ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) &&
-                   xfrm_bundle_ok(xdst, fl, AF_INET6,
+                   xfrm_bundle_ok(policy, xdst, fl, AF_INET6,
                                   (xdst->u.rt6.rt6i_dst.plen != 128 ||
                                    xdst->u.rt6.rt6i_src.plen != 128))) {
                        dst_clone(dst);
index ff98e70b0931f8e74cfc14c3d9b9337b50c12e41..20ff7cca1d070e156e4d18dca77cd33991c79992 100644 (file)
@@ -2928,11 +2928,6 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
                if (*dir)
                        goto out;
        }
-       else {
-               *dir = security_xfrm_sock_policy_alloc(xp, sk);
-               if (*dir)
-                       goto out;
-       }
 
        *dir = pol->sadb_x_policy_dir-1;
        return xp;
index ce94732b8e231d68c7cda461f5168103cf8b45f8..f619c6527266255427cacff4c0acb89025c82755 100644 (file)
@@ -209,7 +209,9 @@ config NETFILTER_XT_TARGET_SECMARK
 
 config NETFILTER_XT_TARGET_CONNSECMARK
        tristate '"CONNSECMARK" target support'
-       depends on NETFILTER_XTABLES && (NF_CONNTRACK_SECMARK || IP_NF_CONNTRACK_SECMARK)
+       depends on NETFILTER_XTABLES && \
+                  ((NF_CONNTRACK && NF_CONNTRACK_SECMARK) || \
+                   (IP_NF_CONNTRACK && IP_NF_CONNTRACK_SECMARK))
        help
          The CONNSECMARK target copies security markings from packets
          to connections, and restores security markings from connections
index 1721f7c78c77b5dcc5ca59d3a82962d7230ee29a..bd0156a28ecdbb1c90b1f927d0d5520e4a8984c5 100644 (file)
@@ -47,13 +47,6 @@ MODULE_LICENSE("GPL");
 
 static char __initdata version[] = "0.93";
 
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-
 static inline int
 ctnetlink_dump_tuples_proto(struct sk_buff *skb, 
                            const struct nf_conntrack_tuple *tuple,
@@ -410,7 +403,6 @@ static int ctnetlink_done(struct netlink_callback *cb)
 {
        if (cb->args[1])
                nf_ct_put((struct nf_conn *)cb->args[1]);
-       DEBUGP("entered %s\n", __FUNCTION__);
        return 0;
 }
 
@@ -425,9 +417,6 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
        u_int8_t l3proto = nfmsg->nfgen_family;
 
-       DEBUGP("entered %s, last bucket=%lu id=%u\n", __FUNCTION__, 
-                       cb->args[0], *id);
-
        read_lock_bh(&nf_conntrack_lock);
        last = (struct nf_conn *)cb->args[1];
        for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
@@ -471,7 +460,6 @@ out:
        if (last)
                nf_ct_put(last);
 
-       DEBUGP("leaving, last bucket=%lu id=%u\n", cb->args[0], *id);
        return skb->len;
 }
 
@@ -482,8 +470,6 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct nf_conntrack_tuple *tuple)
        struct nf_conntrack_l3proto *l3proto;
        int ret = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        nfattr_parse_nested(tb, CTA_IP_MAX, attr);
 
        l3proto = nf_ct_l3proto_find_get(tuple->src.l3num);
@@ -493,8 +479,6 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct nf_conntrack_tuple *tuple)
 
        nf_ct_l3proto_put(l3proto);
 
-       DEBUGP("leaving\n");
-
        return ret;
 }
 
@@ -510,8 +494,6 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr,
        struct nf_conntrack_protocol *proto;
        int ret = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        nfattr_parse_nested(tb, CTA_PROTO_MAX, attr);
 
        if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
@@ -538,8 +520,6 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct nf_conntrack_tuple *tuple,
        struct nfattr *tb[CTA_TUPLE_MAX];
        int err;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        memset(tuple, 0, sizeof(*tuple));
 
        nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]);
@@ -566,10 +546,6 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct nf_conntrack_tuple *tuple,
        else
                tuple->dst.dir = IP_CT_DIR_ORIGINAL;
 
-       NF_CT_DUMP_TUPLE(tuple);
-
-       DEBUGP("leaving\n");
-
        return 0;
 }
 
@@ -586,8 +562,6 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
        struct nfattr *tb[CTA_PROTONAT_MAX];
        struct ip_nat_protocol *npt;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr);
 
        if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat))
@@ -606,7 +580,6 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
 
        ip_nat_proto_put(npt);
 
-       DEBUGP("leaving\n");
        return 0;
 }
 
@@ -622,8 +595,6 @@ ctnetlink_parse_nat(struct nfattr *nat,
        struct nfattr *tb[CTA_NAT_MAX];
        int err;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        memset(range, 0, sizeof(*range));
        
        nfattr_parse_nested(tb, CTA_NAT_MAX, nat);
@@ -649,7 +620,6 @@ ctnetlink_parse_nat(struct nfattr *nat,
        if (err < 0)
                return err;
 
-       DEBUGP("leaving\n");
        return 0;
 }
 #endif
@@ -659,8 +629,6 @@ ctnetlink_parse_help(struct nfattr *attr, char **helper_name)
 {
        struct nfattr *tb[CTA_HELP_MAX];
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        nfattr_parse_nested(tb, CTA_HELP_MAX, attr);
 
        if (!tb[CTA_HELP_NAME-1])
@@ -690,8 +658,6 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
        u_int8_t u3 = nfmsg->nfgen_family;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (nfattr_bad_size(cda, CTA_MAX, cta_min))
                return -EINVAL;
 
@@ -709,10 +675,8 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        h = nf_conntrack_find_get(&tuple, NULL);
-       if (!h) {
-               DEBUGP("tuple not found in conntrack hash\n");
+       if (!h)
                return -ENOENT;
-       }
 
        ct = nf_ct_tuplehash_to_ctrack(h);
        
@@ -727,7 +691,6 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
                ct->timeout.function((unsigned long)ct);
 
        nf_ct_put(ct);
-       DEBUGP("leaving\n");
 
        return 0;
 }
@@ -744,8 +707,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
        u_int8_t u3 = nfmsg->nfgen_family;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                u32 rlen;
 
@@ -779,11 +740,9 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
                return err;
 
        h = nf_conntrack_find_get(&tuple, NULL);
-       if (!h) {
-               DEBUGP("tuple not found in conntrack hash");
+       if (!h)
                return -ENOENT;
-       }
-       DEBUGP("tuple found\n");
+
        ct = nf_ct_tuplehash_to_ctrack(h);
 
        err = -ENOMEM;
@@ -804,7 +763,6 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
        if (err < 0)
                goto out;
 
-       DEBUGP("leaving\n");
        return 0;
 
 free:
@@ -876,8 +834,6 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[])
        char *helpname;
        int err;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (!help) {
                /* FIXME: we need to reallocate and rehash */
                return -EBUSY;
@@ -954,8 +910,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nfattr *cda[])
 {
        int err;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (cda[CTA_HELP-1]) {
                err = ctnetlink_change_helper(ct, cda);
                if (err < 0)
@@ -985,7 +939,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nfattr *cda[])
                ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
 #endif
 
-       DEBUGP("all done\n");
        return 0;
 }
 
@@ -997,8 +950,6 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
        struct nf_conn *ct;
        int err = -EINVAL;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        ct = nf_conntrack_alloc(otuple, rtuple);
        if (ct == NULL || IS_ERR(ct))
                return -ENOMEM; 
@@ -1028,7 +979,6 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
        add_timer(&ct->timeout);
        nf_conntrack_hash_insert(ct);
 
-       DEBUGP("conntrack with id %u inserted\n", ct->id);
        return 0;
 
 err:   
@@ -1046,8 +996,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
        u_int8_t u3 = nfmsg->nfgen_family;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (nfattr_bad_size(cda, CTA_MAX, cta_min))
                return -EINVAL;
 
@@ -1071,7 +1019,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
 
        if (h == NULL) {
                write_unlock_bh(&nf_conntrack_lock);
-               DEBUGP("no such conntrack, create new\n");
                err = -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_CREATE)
                        err = ctnetlink_create_conntrack(cda, &otuple, &rtuple);
@@ -1087,7 +1034,6 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
 
        /* We manipulate the conntrack inside the global conntrack table lock,
         * so there's no need to increase the refcount */
-       DEBUGP("conntrack found\n");
        err = -EEXIST;
        if (!(nlh->nlmsg_flags & NLM_F_EXCL))
                err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), cda);
@@ -1268,8 +1214,6 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        struct nfgenmsg *nfmsg = NLMSG_DATA(cb->nlh);
        u_int8_t l3proto = nfmsg->nfgen_family;
 
-       DEBUGP("entered %s, last id=%llu\n", __FUNCTION__, *id);
-
        read_lock_bh(&nf_conntrack_lock);
        list_for_each_prev(i, &nf_conntrack_expect_list) {
                exp = (struct nf_conntrack_expect *) i;
@@ -1287,8 +1231,6 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
 out:   
        read_unlock_bh(&nf_conntrack_lock);
 
-       DEBUGP("leaving, last id=%llu\n", *id);
-
        return skb->len;
 }
 
@@ -1308,8 +1250,6 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
        u_int8_t u3 = nfmsg->nfgen_family;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
                return -EINVAL;
 
@@ -1460,8 +1400,6 @@ ctnetlink_create_expect(struct nfattr *cda[], u_int8_t u3)
        struct nf_conn_help *help;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);
-
        /* caller guarantees that those three CTA_EXPECT_* exist */
        err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
        if (err < 0)
@@ -1516,8 +1454,6 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
        u_int8_t u3 = nfmsg->nfgen_family;
        int err = 0;
 
-       DEBUGP("entered %s\n", __FUNCTION__);   
-
        if (nfattr_bad_size(cda, CTA_EXPECT_MAX, cta_min_exp))
                return -EINVAL;
 
@@ -1546,8 +1482,6 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
                err = ctnetlink_change_expect(exp, cda);
        write_unlock_bh(&nf_conntrack_lock);
 
-       DEBUGP("leaving\n");
-       
        return err;
 }
 
index db9b896e57c8527ecfc9db2d7c0decbaca77b8e2..39e117502bd7c821afc4c1adef330585bdedf2f2 100644 (file)
@@ -68,7 +68,7 @@ static int __init xt_nfqueue_init(void)
 
 static void __exit xt_nfqueue_fini(void)
 {
-       xt_register_targets(xt_nfqueue_target, ARRAY_SIZE(xt_nfqueue_target));
+       xt_unregister_targets(xt_nfqueue_target, ARRAY_SIZE(xt_nfqueue_target));
 }
 
 module_init(xt_nfqueue_init);
index 92a5726ef237e2fb6f894c205bf26d9b97971075..a8f03057dbdedd8c4887b0585ab48e60d1bd5b03 100644 (file)
@@ -147,7 +147,7 @@ static int __init xt_connmark_init(void)
 
 static void __exit xt_connmark_fini(void)
 {
-       xt_register_matches(xt_connmark_match, ARRAY_SIZE(xt_connmark_match));
+       xt_unregister_matches(xt_connmark_match, ARRAY_SIZE(xt_connmark_match));
 }
 
 module_init(xt_connmark_init);
index 54fb7de3c2b1d42be1b4dc131810cf70e9618614..ff971103fd0ce4e9732e6d621c6df38c6ac45723 100644 (file)
@@ -200,7 +200,7 @@ void netlbl_cache_invalidate(void)
 int netlbl_cache_add(const struct sk_buff *skb,
                     const struct netlbl_lsm_secattr *secattr)
 {
-       if (secattr->cache.data == NULL)
+       if (secattr->cache == NULL)
                return -ENOMSG;
 
        if (CIPSO_V4_OPTEXIST(skb))
index bb3ddd4784b1cebfd4668dc15df844b484b36267..9b9c555c713f0a4666c00657da1b37037928b722 100644 (file)
@@ -786,11 +786,10 @@ static long htb_do_events(struct htb_sched *q, int level)
        for (i = 0; i < 500; i++) {
                struct htb_class *cl;
                long diff;
-               struct rb_node *p = q->wait_pq[level].rb_node;
+               struct rb_node *p = rb_first(&q->wait_pq[level]);
+
                if (!p)
                        return 0;
-               while (p->rb_left)
-                       p = p->rb_left;
 
                cl = rb_entry(p, struct htb_class, pq_node);
                if (time_after(cl->pq_key, q->jiffies)) {
index a356d8d310a95f0cc04033f091fa229ea7a510ea..7f49e769080ea631e9827e9e483bcb1577a9721b 100644 (file)
@@ -344,7 +344,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
                           assoc, sk, sctp_sk(sk)->type, sk->sk_state,
                           assoc->state, hash, assoc->assoc_id,
                           assoc->sndbuf_used,
-                          (sk->sk_rcvbuf - assoc->rwnd),
+                          atomic_read(&assoc->rmem_alloc),
                           sock_i_uid(sk), sock_i_ino(sk),
                           epb->bind_addr.port,
                           assoc->peer.port);
index 3fe906d6506982f9e1c7a0bdcf6e67b6d2772d58..9deec43911871c1107a17e764fd2d8a7ffffb361 100644 (file)
@@ -5362,6 +5362,20 @@ static void sctp_wfree(struct sk_buff *skb)
        sctp_association_put(asoc);
 }
 
+/* Do accounting for the receive space on the socket.
+ * Accounting for the association is done in ulpevent.c
+ * We set this as a destructor for the cloned data skbs so that
+ * accounting is done at the correct time.
+ */
+void sctp_sock_rfree(struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+       struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+       atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
+}
+
+
 /* Helper function to wait for space in the sndbuf.  */
 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
                                size_t msg_len)
@@ -5634,10 +5648,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
        sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
                event = sctp_skb2event(skb);
                if (event->asoc == assoc) {
-                       sock_rfree(skb);
+                       sctp_sock_rfree(skb);
                        __skb_unlink(skb, &oldsk->sk_receive_queue);
                        __skb_queue_tail(&newsk->sk_receive_queue, skb);
-                       skb_set_owner_r(skb, newsk);
+                       sctp_skb_set_owner_r(skb, newsk);
                }
        }
 
@@ -5665,10 +5679,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
                sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
                        event = sctp_skb2event(skb);
                        if (event->asoc == assoc) {
-                               sock_rfree(skb);
+                               sctp_sock_rfree(skb);
                                __skb_unlink(skb, &oldsp->pd_lobby);
                                __skb_queue_tail(queue, skb);
-                               skb_set_owner_r(skb, newsk);
+                               sctp_skb_set_owner_r(skb, newsk);
                        }
                }
 
index ee236784a6bb91ea8bbf3787b776aabf65971ae9..a015283a90870bcb5f9c5c85d08d5742c865813a 100644 (file)
@@ -55,10 +55,13 @@ static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event);
 
 
 /* Initialize an ULP event from an given skb.  */
-SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags)
+SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event,
+                                   int msg_flags,
+                                   unsigned int len)
 {
        memset(event, 0, sizeof(struct sctp_ulpevent));
        event->msg_flags = msg_flags;
+       event->rmem_len = len;
 }
 
 /* Create a new sctp_ulpevent.  */
@@ -73,7 +76,7 @@ SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags,
                goto fail;
 
        event = sctp_skb2event(skb);
-       sctp_ulpevent_init(event, msg_flags);
+       sctp_ulpevent_init(event, msg_flags, skb->truesize);
 
        return event;
 
@@ -101,17 +104,16 @@ static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event,
        sctp_association_hold((struct sctp_association *)asoc);
        skb = sctp_event2skb(event);
        event->asoc = (struct sctp_association *)asoc;
-       atomic_add(skb->truesize, &event->asoc->rmem_alloc);
-       skb_set_owner_r(skb, asoc->base.sk);
+       atomic_add(event->rmem_len, &event->asoc->rmem_alloc);
+       sctp_skb_set_owner_r(skb, asoc->base.sk);
 }
 
 /* A simple destructor to give up the reference to the association. */
 static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event)
 {
        struct sctp_association *asoc = event->asoc;
-       struct sk_buff *skb = sctp_event2skb(event);
 
-       atomic_sub(skb->truesize, &asoc->rmem_alloc);
+       atomic_sub(event->rmem_len, &asoc->rmem_alloc);
        sctp_association_put(asoc);
 }
 
@@ -372,7 +374,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
 
        /* Embed the event fields inside the cloned skb.  */
        event = sctp_skb2event(skb);
-       sctp_ulpevent_init(event, MSG_NOTIFICATION);
+       sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
 
        sre = (struct sctp_remote_error *)
                skb_push(skb, sizeof(struct sctp_remote_error));
@@ -464,7 +466,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
 
        /* Embed the event fields inside the cloned skb.  */
        event = sctp_skb2event(skb);
-       sctp_ulpevent_init(event, MSG_NOTIFICATION);
+       sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
 
        ssf = (struct sctp_send_failed *)
                skb_push(skb, sizeof(struct sctp_send_failed));
@@ -682,8 +684,11 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
        /* Embed the event fields inside the cloned skb.  */
        event = sctp_skb2event(skb);
 
-       /* Initialize event with flags 0.  */
-       sctp_ulpevent_init(event, 0);
+       /* Initialize event with flags 0  and correct length
+        * Since this is a clone of the original skb, only account for
+        * the data of this chunk as other chunks will be accounted separately.
+        */
+       sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff));
 
        sctp_ulpevent_receive_data(event, asoc);
 
index 575e556aeb3eb58c30dd80f86c6158defb3c664f..e1d144275f97aab998d4edc5155e608a316945e9 100644 (file)
@@ -309,7 +309,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
                        if (!new)
                                return NULL;    /* try again later */
 
-                       new->sk = f_frag->sk;
+                       sctp_skb_set_owner_r(new, f_frag->sk);
 
                        skb_shinfo(new)->frag_list = pos;
                } else
index 2807fa0eab40bd7644386410a125dc52d1369893..eb44ec929ca115a5944b7dd8f380a79d3b057249 100644 (file)
@@ -828,6 +828,11 @@ svc_process(struct svc_rqst *rqstp)
                *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
 
                /* Encode reply */
+               if (*statp == rpc_drop_reply) {
+                       if (procp->pc_release)
+                               procp->pc_release(rqstp, NULL, rqstp->rq_resp);
+                       goto dropit;
+               }
                if (*statp == rpc_success && (xdr = procp->pc_encode)
                 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
                        dprintk("svc: failed to encode reply\n");
index 2a7861661f14e5f268fd376117bdd70367165893..7736b23c3f0386a7c0bab1840425fdb69e7a7cd8 100644 (file)
@@ -883,30 +883,32 @@ out:
 }
 EXPORT_SYMBOL(xfrm_policy_walk);
 
-/* Find policy to apply to this flow. */
-
+/*
+ * Find policy to apply to this flow.
+ *
+ * Returns 0 if policy found, else an -errno.
+ */
 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
                             u8 type, u16 family, int dir)
 {
        struct xfrm_selector *sel = &pol->selector;
-       int match;
+       int match, ret = -ESRCH;
 
        if (pol->family != family ||
            pol->type != type)
-               return 0;
+               return ret;
 
        match = xfrm_selector_match(sel, fl, family);
-       if (match) {
-               if (!security_xfrm_policy_lookup(pol, fl->secid, dir))
-                       return 1;
-       }
+       if (match)
+               ret = security_xfrm_policy_lookup(pol, fl->secid, dir);
 
-       return 0;
+       return ret;
 }
 
 static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
                                                     u16 family, u8 dir)
 {
+       int err;
        struct xfrm_policy *pol, *ret;
        xfrm_address_t *daddr, *saddr;
        struct hlist_node *entry;
@@ -922,7 +924,15 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
        chain = policy_hash_direct(daddr, saddr, family, dir);
        ret = NULL;
        hlist_for_each_entry(pol, entry, chain, bydst) {
-               if (xfrm_policy_match(pol, fl, type, family, dir)) {
+               err = xfrm_policy_match(pol, fl, type, family, dir);
+               if (err) {
+                       if (err == -ESRCH)
+                               continue;
+                       else {
+                               ret = ERR_PTR(err);
+                               goto fail;
+                       }
+               } else {
                        ret = pol;
                        priority = ret->priority;
                        break;
@@ -930,36 +940,53 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(u8 type, struct flowi *fl,
        }
        chain = &xfrm_policy_inexact[dir];
        hlist_for_each_entry(pol, entry, chain, bydst) {
-               if (xfrm_policy_match(pol, fl, type, family, dir) &&
-                   pol->priority < priority) {
+               err = xfrm_policy_match(pol, fl, type, family, dir);
+               if (err) {
+                       if (err == -ESRCH)
+                               continue;
+                       else {
+                               ret = ERR_PTR(err);
+                               goto fail;
+                       }
+               } else if (pol->priority < priority) {
                        ret = pol;
                        break;
                }
        }
        if (ret)
                xfrm_pol_hold(ret);
+fail:
        read_unlock_bh(&xfrm_policy_lock);
 
        return ret;
 }
 
-static void xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
+static int xfrm_policy_lookup(struct flowi *fl, u16 family, u8 dir,
                               void **objp, atomic_t **obj_refp)
 {
        struct xfrm_policy *pol;
+       int err = 0;
 
 #ifdef CONFIG_XFRM_SUB_POLICY
        pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_SUB, fl, family, dir);
-       if (pol)
+       if (IS_ERR(pol)) {
+               err = PTR_ERR(pol);
+               pol = NULL;
+       }
+       if (pol || err)
                goto end;
 #endif
        pol = xfrm_policy_lookup_bytype(XFRM_POLICY_TYPE_MAIN, fl, family, dir);
-
+       if (IS_ERR(pol)) {
+               err = PTR_ERR(pol);
+               pol = NULL;
+       }
 #ifdef CONFIG_XFRM_SUB_POLICY
 end:
 #endif
        if ((*objp = (void *) pol) != NULL)
                *obj_refp = &pol->refcnt;
+       return err;
 }
 
 static inline int policy_to_flow_dir(int dir)
@@ -989,12 +1016,16 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
                                                sk->sk_family);
                int err = 0;
 
-               if (match)
-                 err = security_xfrm_policy_lookup(pol, fl->secid, policy_to_flow_dir(dir));
-
-               if (match && !err)
-                       xfrm_pol_hold(pol);
-               else
+               if (match) {
+                       err = security_xfrm_policy_lookup(pol, fl->secid,
+                                       policy_to_flow_dir(dir));
+                       if (!err)
+                               xfrm_pol_hold(pol);
+                       else if (err == -ESRCH)
+                               pol = NULL;
+                       else
+                               pol = ERR_PTR(err);
+               } else
                        pol = NULL;
        }
        read_unlock_bh(&xfrm_policy_lock);
@@ -1286,8 +1317,11 @@ restart:
        pol_dead = 0;
        xfrm_nr = 0;
 
-       if (sk && sk->sk_policy[1])
+       if (sk && sk->sk_policy[1]) {
                policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+               if (IS_ERR(policy))
+                       return PTR_ERR(policy);
+       }
 
        if (!policy) {
                /* To accelerate a bit...  */
@@ -1297,6 +1331,8 @@ restart:
 
                policy = flow_cache_lookup(fl, dst_orig->ops->family,
                                           dir, xfrm_policy_lookup);
+               if (IS_ERR(policy))
+                       return PTR_ERR(policy);
        }
 
        if (!policy)
@@ -1343,6 +1379,10 @@ restart:
                                                            fl, family,
                                                            XFRM_POLICY_OUT);
                        if (pols[1]) {
+                               if (IS_ERR(pols[1])) {
+                                       err = PTR_ERR(pols[1]);
+                                       goto error;
+                               }
                                if (pols[1]->action == XFRM_POLICY_BLOCK) {
                                        err = -EPERM;
                                        goto error;
@@ -1574,13 +1614,19 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
        }
 
        pol = NULL;
-       if (sk && sk->sk_policy[dir])
+       if (sk && sk->sk_policy[dir]) {
                pol = xfrm_sk_policy_lookup(sk, dir, &fl);
+               if (IS_ERR(pol))
+                       return 0;
+       }
 
        if (!pol)
                pol = flow_cache_lookup(&fl, family, fl_dir,
                                        xfrm_policy_lookup);
 
+       if (IS_ERR(pol))
+               return 0;
+
        if (!pol) {
                if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
                        xfrm_secpath_reject(xerr_idx, skb, &fl);
@@ -1599,6 +1645,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                                                    &fl, family,
                                                    XFRM_POLICY_IN);
                if (pols[1]) {
+                       if (IS_ERR(pols[1]))
+                               return 0;
                        pols[1]->curlft.use_time = (unsigned long)xtime.tv_sec;
                        npols ++;
                }
@@ -1706,7 +1754,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
 
 static int stale_bundle(struct dst_entry *dst)
 {
-       return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
+       return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
 }
 
 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -1828,7 +1876,8 @@ EXPORT_SYMBOL(xfrm_init_pmtu);
  * still valid.
  */
 
-int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family, int strict)
+int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
+               struct flowi *fl, int family, int strict)
 {
        struct dst_entry *dst = &first->u.dst;
        struct xfrm_dst *last;
@@ -1845,7 +1894,7 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family, int str
 
                if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
                        return 0;
-               if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm))
+               if (fl && !security_xfrm_flow_state_match(fl, dst->xfrm, pol))
                        return 0;
                if (dst->xfrm->km.state != XFRM_STATE_VALID)
                        return 0;
index 39b8bf3a9ded2c7d92b61f8477e138c772c45d3f..84bbf8474f3eb659fea321cac5629ff1e96a0d48 100644 (file)
@@ -614,6 +614,14 @@ out:
        return x;
 }
 
+static void xfrm_hash_grow_check(int have_hash_collision)
+{
+       if (have_hash_collision &&
+           (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
+           xfrm_state_num > xfrm_state_hmask)
+               schedule_work(&xfrm_hash_work);
+}
+
 static void __xfrm_state_insert(struct xfrm_state *x)
 {
        unsigned int h;
@@ -642,10 +650,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
 
        xfrm_state_num++;
 
-       if (x->bydst.next != NULL &&
-           (xfrm_state_hmask + 1) < xfrm_state_hashmax &&
-           xfrm_state_num > xfrm_state_hmask)
-               schedule_work(&xfrm_hash_work);
+       xfrm_hash_grow_check(x->bydst.next != NULL);
 }
 
 /* xfrm_state_lock is held */
@@ -753,6 +758,10 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re
                h = xfrm_src_hash(daddr, saddr, family);
                hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
                wake_up(&km_waitq);
+
+               xfrm_state_num++;
+
+               xfrm_hash_grow_check(x->bydst.next != NULL);
        }
 
        return x;
index d54b3a70d5dfb6cda89a8661a0ee380c078d791d..2b2e59d8ffbc8ca197c8b84b98248c92511ce365 100644 (file)
@@ -1992,15 +1992,6 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
        xp->type = XFRM_POLICY_TYPE_MAIN;
        copy_templates(xp, ut, nr);
 
-       if (!xp->security) {
-               int err = security_xfrm_sock_policy_alloc(xp, sk);
-               if (err) {
-                       kfree(xp);
-                       *dir = err;
-                       return NULL;
-               }
-       }
-
        *dir = p->dir;
 
        return xp;
index 6a026f69b563e67cf098f110901b490a887bd8f0..4241e0dfeeaf1edeae16c4fd61a505b1985bb28f 100644 (file)
@@ -168,7 +168,7 @@ $(objhdr-y) $(header-y) $(unifdef-y): $(KBUILDFILES)
        $(call cmd,gen)
 
 else
-$(objhdr-y) :          $(INSTALL_HDR_PATH)/$(_dst)/%.h: $(srctree)/$(obj)/%.h $(KBUILDFILES)
+$(objhdr-y) :          $(INSTALL_HDR_PATH)/$(_dst)/%.h: $(objtree)/$(obj)/%.h $(KBUILDFILES)
        $(call cmd,o_hdr_install)
 
 $(header-y) :          $(INSTALL_HDR_PATH)/$(_dst)/%.h: $(srctree)/$(obj)/%.h $(KBUILDFILES)
index 6c5469b1473bee648984676165942cebed2b5971..65e0a79c36cf8525480181945b53ab25eef8bcd2 100644 (file)
@@ -44,7 +44,7 @@ include scripts/Kbuild.include
 include scripts/Makefile.lib
 
 kernelsymfile := $(objtree)/Module.symvers
-modulesymfile := $(KBUILD_EXTMOD)/Module.symvers
+modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers
 
 # Step 1), find all modules listed in $(MODVERDIR)/
 __modules := $(sort $(shell grep -h '\.ko' /dev/null $(wildcard $(MODVERDIR)/*.mod)))
index 8dea47f9d3e49a36803ed4c90223d9858a83d3de..fd695e1070f76de6d64f2b6d95b2b2a5fbf84a9f 100644 (file)
@@ -24,6 +24,7 @@
 #include <ctype.h>
 #include <stdlib.h>
 #include <string.h>
+#include <stdbool.h>
 
 #ifdef __sun__
 #define CURS_MACROS
index 00d1ad19b2cc8babe05a17190ca48c5029eae9a1..187f5de4612c2247bee0e003ac397db6ff70cbc1 100755 (executable)
@@ -1262,7 +1262,9 @@ sub output_intro_text(%) {
 }
 
 ##
-# generic output function for typedefs
+# generic output function for all types (function, struct/union, typedef, enum);
+# calls the generated, variable output_ function name based on
+# functype and output_mode
 sub output_declaration {
     no strict 'refs';
     my $name = shift;
@@ -1278,8 +1280,7 @@ sub output_declaration {
 }
 
 ##
-# generic output function - calls the right one based
-# on current output mode.
+# generic output function - calls the right one based on current output mode.
 sub output_intro {
     no strict 'refs';
     my $func = "output_intro_".$output_mode;
@@ -1518,6 +1519,9 @@ sub dump_function($$) {
     $prototype =~ s/^asmlinkage +//;
     $prototype =~ s/^inline +//;
     $prototype =~ s/^__inline__ +//;
+    $prototype =~ s/^__inline +//;
+    $prototype =~ s/^__always_inline +//;
+    $prototype =~ s/^noinline +//;
     $prototype =~ s/__devinit +//;
     $prototype =~ s/^#define +//; #ak added
     $prototype =~ s/__attribute__ \(\([a-z,]*\)\)//;
@@ -1778,8 +1782,9 @@ sub process_file($) {
                $in_doc_sect = 1;
                $contents = $newcontents;
                if ($contents ne "") {
-                   if (substr($contents, 0, 1) eq " ") {
-                       $contents = substr($contents, 1);
+                   while ((substr($contents, 0, 1) eq " ") ||
+                       substr($contents, 0, 1) eq "\t") {
+                           $contents = substr($contents, 1);
                    }
                    $contents .= "\n";
                }
index aeee70565509dd8e6c843591e142f452fb721e96..43874c1e6e23999def36ff43b6918d806dcaf54b 100644 (file)
@@ -881,7 +881,8 @@ static int dummy_xfrm_state_pol_flow_match(struct xfrm_state *x,
        return 1;
 }
 
-static int dummy_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm)
+static int dummy_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm,
+                               struct xfrm_policy *xp)
 {
        return 1;
 }
index 81eb59890162e506126d734de92fd002c02994b4..526b28019acaa55ac0c7db6da9b7933fef8936c8 100644 (file)
@@ -19,7 +19,8 @@ int selinux_xfrm_state_delete(struct xfrm_state *x);
 int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir);
 int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
                        struct xfrm_policy *xp, struct flowi *fl);
-int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm);
+int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm,
+                       struct xfrm_policy *xp);
 
 
 /*
index cfed1d30fa6ad7af8e8aae11810bf58ba61dc27a..d539346ab3a2b5143f1d2232239ee8bdc217e4c8 100644 (file)
@@ -93,11 +93,15 @@ int ebitmap_export(const struct ebitmap *src,
        size_t bitmap_byte;
        unsigned char bitmask;
 
+       if (src->highbit == 0) {
+               *dst = NULL;
+               *dst_len = 0;
+               return 0;
+       }
+
        bitmap_len = src->highbit / 8;
        if (src->highbit % 7)
                bitmap_len += 1;
-       if (bitmap_len == 0)
-               return -EINVAL;
 
        bitmap = kzalloc((bitmap_len & ~(sizeof(MAPTYPE) - 1)) +
                         sizeof(MAPTYPE),
index c713af23250a331d81df2ec82d439cc38768571c..2cca8e251624037d3967d8fde0d21d8ed8ddf2a6 100644 (file)
@@ -640,8 +640,13 @@ int mls_export_cat(const struct context *context,
 {
        int rc = -EPERM;
 
-       if (!selinux_mls_enabled)
+       if (!selinux_mls_enabled) {
+               *low = NULL;
+               *low_len = 0;
+               *high = NULL;
+               *high_len = 0;
                return 0;
+       }
 
        if (low != NULL) {
                rc = ebitmap_export(&context->range.level[0].cat,
@@ -661,10 +666,16 @@ int mls_export_cat(const struct context *context,
        return 0;
 
 export_cat_failure:
-       if (low != NULL)
+       if (low != NULL) {
                kfree(*low);
-       if (high != NULL)
+               *low = NULL;
+               *low_len = 0;
+       }
+       if (high != NULL) {
                kfree(*high);
+               *high = NULL;
+               *high_len = 0;
+       }
        return rc;
 }
 
index b18895302555618f02ee0d03590bc2e04ee3d1c6..ba48961f9d0593f99a30b0c3983a09c63425f766 100644 (file)
@@ -618,6 +618,7 @@ void policydb_destroy(struct policydb *p)
                        c = c->next;
                        ocontext_destroy(ctmp,i);
                }
+               p->ocontexts[i] = NULL;
        }
 
        g = p->genfs;
@@ -633,6 +634,7 @@ void policydb_destroy(struct policydb *p)
                g = g->next;
                kfree(gtmp);
        }
+       p->genfs = NULL;
 
        cond_policydb_destroy(p);
 
index 0c219a1b32435e0e83eaa980b15b8031b11ed539..b1f6fb36c6997cc40ea8cf350adf12a9218bf8bc 100644 (file)
@@ -2172,7 +2172,12 @@ struct netlbl_cache {
  */
 static void selinux_netlbl_cache_free(const void *data)
 {
-       struct netlbl_cache *cache = NETLBL_CACHE(data);
+       struct netlbl_cache *cache;
+
+       if (data == NULL)
+               return;
+
+       cache = NETLBL_CACHE(data);
        switch (cache->type) {
        case NETLBL_CACHE_T_MLS:
                ebitmap_destroy(&cache->data.mls_label.level[0].cat);
@@ -2197,17 +2202,20 @@ static void selinux_netlbl_cache_add(struct sk_buff *skb, struct context *ctx)
        struct netlbl_lsm_secattr secattr;
 
        netlbl_secattr_init(&secattr);
+       secattr.cache = netlbl_secattr_cache_alloc(GFP_ATOMIC);
+       if (secattr.cache == NULL)
+               goto netlbl_cache_add_return;
 
        cache = kzalloc(sizeof(*cache), GFP_ATOMIC);
        if (cache == NULL)
-               goto netlbl_cache_add_failure;
-       secattr.cache.free = selinux_netlbl_cache_free;
-       secattr.cache.data = (void *)cache;
+               goto netlbl_cache_add_return;
+       secattr.cache->free = selinux_netlbl_cache_free;
+       secattr.cache->data = (void *)cache;
 
        cache->type = NETLBL_CACHE_T_MLS;
        if (ebitmap_cpy(&cache->data.mls_label.level[0].cat,
                        &ctx->range.level[0].cat) != 0)
-               goto netlbl_cache_add_failure;
+               goto netlbl_cache_add_return;
        cache->data.mls_label.level[1].cat.highbit =
                cache->data.mls_label.level[0].cat.highbit;
        cache->data.mls_label.level[1].cat.node =
@@ -2215,13 +2223,10 @@ static void selinux_netlbl_cache_add(struct sk_buff *skb, struct context *ctx)
        cache->data.mls_label.level[0].sens = ctx->range.level[0].sens;
        cache->data.mls_label.level[1].sens = ctx->range.level[0].sens;
 
-       if (netlbl_cache_add(skb, &secattr) != 0)
-               goto netlbl_cache_add_failure;
-
-       return;
+       netlbl_cache_add(skb, &secattr);
 
-netlbl_cache_add_failure:
-       netlbl_secattr_destroy(&secattr, 1);
+netlbl_cache_add_return:
+       netlbl_secattr_destroy(&secattr);
 }
 
 /**
@@ -2263,8 +2268,8 @@ static int selinux_netlbl_secattr_to_sid(struct sk_buff *skb,
 
        POLICY_RDLOCK;
 
-       if (secattr->cache.data) {
-               cache = NETLBL_CACHE(secattr->cache.data);
+       if (secattr->cache) {
+               cache = NETLBL_CACHE(secattr->cache->data);
                switch (cache->type) {
                case NETLBL_CACHE_T_SID:
                        *sid = cache->data.sid;
@@ -2331,7 +2336,7 @@ static int selinux_netlbl_secattr_to_sid(struct sk_buff *skb,
                        selinux_netlbl_cache_add(skb, &ctx_new);
                ebitmap_destroy(&ctx_new.range.level[0].cat);
        } else {
-               *sid = SECINITSID_UNLABELED;
+               *sid = SECSID_NULL;
                rc = 0;
        }
 
@@ -2369,7 +2374,7 @@ static int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
                                                   &secattr,
                                                   base_sid,
                                                   sid);
-       netlbl_secattr_destroy(&secattr, 0);
+       netlbl_secattr_destroy(&secattr);
 
        return rc;
 }
@@ -2394,31 +2399,33 @@ static int selinux_netlbl_socket_setsid(struct socket *sock, u32 sid)
        if (!ss_initialized)
                return 0;
 
+       netlbl_secattr_init(&secattr);
+
        POLICY_RDLOCK;
 
        ctx = sidtab_search(&sidtab, sid);
        if (ctx == NULL)
                goto netlbl_socket_setsid_return;
 
-       netlbl_secattr_init(&secattr);
        secattr.domain = kstrdup(policydb.p_type_val_to_name[ctx->type - 1],
                                 GFP_ATOMIC);
        mls_export_lvl(ctx, &secattr.mls_lvl, NULL);
        secattr.mls_lvl_vld = 1;
-       mls_export_cat(ctx,
-                      &secattr.mls_cat,
-                      &secattr.mls_cat_len,
-                      NULL,
-                      NULL);
+       rc = mls_export_cat(ctx,
+                           &secattr.mls_cat,
+                           &secattr.mls_cat_len,
+                           NULL,
+                           NULL);
+       if (rc != 0)
+               goto netlbl_socket_setsid_return;
 
        rc = netlbl_socket_setattr(sock, &secattr);
        if (rc == 0)
                sksec->nlbl_state = NLBL_LABELED;
 
-       netlbl_secattr_destroy(&secattr, 0);
-
 netlbl_socket_setsid_return:
        POLICY_RDUNLOCK;
+       netlbl_secattr_destroy(&secattr);
        return rc;
 }
 
@@ -2514,10 +2521,10 @@ void selinux_netlbl_sock_graft(struct sock *sk, struct socket *sock)
        if (netlbl_sock_getattr(sk, &secattr) == 0 &&
            selinux_netlbl_secattr_to_sid(NULL,
                                          &secattr,
-                                         sksec->sid,
+                                         SECINITSID_UNLABELED,
                                          &nlbl_peer_sid) == 0)
                sksec->peer_sid = nlbl_peer_sid;
-       netlbl_secattr_destroy(&secattr, 0);
+       netlbl_secattr_destroy(&secattr);
 
        sksec->nlbl_state = NLBL_REQUIRE;
 
@@ -2547,9 +2554,6 @@ u32 selinux_netlbl_inet_conn_request(struct sk_buff *skb, u32 sock_sid)
        if (rc != 0)
                return SECSID_NULL;
 
-       if (peer_sid == SECINITSID_UNLABELED)
-               return SECSID_NULL;
-
        return peer_sid;
 }
 
@@ -2611,11 +2615,13 @@ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
        u32 netlbl_sid;
        u32 recv_perm;
 
-       rc = selinux_netlbl_skbuff_getsid(skb, SECINITSID_NETMSG, &netlbl_sid);
+       rc = selinux_netlbl_skbuff_getsid(skb,
+                                         SECINITSID_UNLABELED,
+                                         &netlbl_sid);
        if (rc != 0)
                return rc;
 
-       if (netlbl_sid == SECINITSID_UNLABELED)
+       if (netlbl_sid == SECSID_NULL)
                return 0;
 
        switch (sksec->sclass) {
@@ -2653,10 +2659,6 @@ int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
 u32 selinux_netlbl_socket_getpeersec_stream(struct socket *sock)
 {
        struct sk_security_struct *sksec = sock->sk->sk_security;
-
-       if (sksec->peer_sid == SECINITSID_UNLABELED)
-               return SECSID_NULL;
-
        return sksec->peer_sid;
 }
 
@@ -2672,16 +2674,10 @@ u32 selinux_netlbl_socket_getpeersec_stream(struct socket *sock)
 u32 selinux_netlbl_socket_getpeersec_dgram(struct sk_buff *skb)
 {
        int peer_sid;
-       struct sock *sk = skb->sk;
-       struct inode_security_struct *isec;
 
-       if (sk == NULL || sk->sk_socket == NULL)
-               return SECSID_NULL;
-
-       isec = SOCK_INODE(sk->sk_socket)->i_security;
-       if (selinux_netlbl_skbuff_getsid(skb, isec->sid, &peer_sid) != 0)
-               return SECSID_NULL;
-       if (peer_sid == SECINITSID_UNLABELED)
+       if (selinux_netlbl_skbuff_getsid(skb,
+                                        SECINITSID_UNLABELED,
+                                        &peer_sid) != 0)
                return SECSID_NULL;
 
        return peer_sid;
index 3e742b850af6e9632ecfd5e4d8b08b02fa0f00cd..675b995a67c3ec1836fd6d7034e1dbab8cc6a0a3 100644 (file)
@@ -77,8 +77,8 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
  */
 int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir)
 {
-       int rc = 0;
-       u32 sel_sid = SECINITSID_UNLABELED;
+       int rc;
+       u32 sel_sid;
        struct xfrm_sec_ctx *ctx;
 
        /* Context sid is either set to label or ANY_ASSOC */
@@ -88,11 +88,21 @@ int selinux_xfrm_policy_lookup(struct xfrm_policy *xp, u32 fl_secid, u8 dir)
 
                sel_sid = ctx->ctx_sid;
        }
+       else
+               /*
+                * All flows should be treated as polmatch'ing an
+                * otherwise applicable "non-labeled" policy. This
+                * would prevent inadvertent "leaks".
+                */
+               return 0;
 
        rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION,
                          ASSOCIATION__POLMATCH,
                          NULL);
 
+       if (rc == -EACCES)
+               rc = -ESRCH;
+
        return rc;
 }
 
@@ -108,15 +118,20 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
        u32 pol_sid;
        int err;
 
-       if (x->security)
-               state_sid = x->security->ctx_sid;
-       else
-               state_sid = SECINITSID_UNLABELED;
-
-       if (xp->security)
+       if (xp->security) {
+               if (!x->security)
+                       /* unlabeled SA and labeled policy can't match */
+                       return 0;
+               else
+                       state_sid = x->security->ctx_sid;
                pol_sid = xp->security->ctx_sid;
-       else
-               pol_sid = SECINITSID_UNLABELED;
+       } else
+               if (x->security)
+                       /* unlabeled policy and labeled SA can't match */
+                       return 0;
+               else
+                       /* unlabeled policy and unlabeled SA match all flows */
+                       return 1;
 
        err = avc_has_perm(state_sid, pol_sid, SECCLASS_ASSOCIATION,
                          ASSOCIATION__POLMATCH,
@@ -125,7 +140,11 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
        if (err)
                return 0;
 
-       return selinux_xfrm_flow_state_match(fl, x);
+       err = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION,
+                         ASSOCIATION__SENDTO,
+                         NULL)? 0:1;
+
+       return err;
 }
 
 /*
@@ -133,12 +152,22 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
  * can use a given security association.
  */
 
-int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm)
+int selinux_xfrm_flow_state_match(struct flowi *fl, struct xfrm_state *xfrm,
+                                 struct xfrm_policy *xp)
 {
        int rc = 0;
        u32 sel_sid = SECINITSID_UNLABELED;
        struct xfrm_sec_ctx *ctx;
 
+       if (!xp->security)
+               if (!xfrm->security)
+                       return 1;
+               else
+                       return 0;
+       else
+               if (!xfrm->security)
+                       return 0;
+
        /* Context sid is either set to label or ANY_ASSOC */
        if ((ctx = xfrm->security)) {
                if (!selinux_authorizable_ctx(ctx))